Revert r326946. It caused stack overflows by significantly increasing the size of a CallArgList.

llvm-svn: 327195
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 7e685d2..3d24e1f 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -1160,7 +1160,7 @@
     if (UseOptimizedLibcall && Res.getScalarVal()) {
       llvm::Value *ResVal = Res.getScalarVal();
       if (PostOp) {
-        llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
+        llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
       }
       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index e7b2acc..f7736c1 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1040,49 +1040,42 @@
 }
 
 void CodeGenFunction::ExpandTypeToArgs(
-    QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
+    QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
   auto Exp = getTypeExpansion(Ty, getContext());
   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
-    Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
-                                   : Arg.getKnownRValue().getAggregateAddress();
-    forConstantArrayExpansion(
-        *this, CAExp, Addr, [&](Address EltAddr) {
-          CallArg EltArg = CallArg(
-              convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
-              CAExp->EltTy);
-          ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
-                           IRCallArgPos);
-        });
+    forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
+                              [&](Address EltAddr) {
+      RValue EltRV =
+          convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
+      ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
+    });
   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
-    Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
-                                   : Arg.getKnownRValue().getAggregateAddress();
+    Address This = RV.getAggregateAddress();
     for (const CXXBaseSpecifier *BS : RExp->Bases) {
       // Perform a single step derived-to-base conversion.
       Address Base =
           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
                                 /*NullCheckValue=*/false, SourceLocation());
-      CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
+      RValue BaseRV = RValue::getAggregate(Base);
 
       // Recurse onto bases.
-      ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
+      ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
                        IRCallArgPos);
     }
 
     LValue LV = MakeAddrLValue(This, Ty);
     for (auto FD : RExp->Fields) {
-      CallArg FldArg =
-          CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
-      ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
+      RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
+      ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
                        IRCallArgPos);
     }
   } else if (isa<ComplexExpansion>(Exp.get())) {
-    ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
+    ComplexPairTy CV = RV.getComplexVal();
     IRCallArgs[IRCallArgPos++] = CV.first;
     IRCallArgs[IRCallArgPos++] = CV.second;
   } else {
     assert(isa<NoExpansion>(Exp.get()));
-    auto RV = Arg.getKnownRValue();
     assert(RV.isScalar() &&
            "Unexpected non-scalar rvalue during struct expansion.");
 
@@ -3425,17 +3418,13 @@
     assert(InitialArgSize + 1 == Args.size() &&
            "The code below depends on only adding one arg per EmitCallArg");
     (void)InitialArgSize;
-    // Since pointer argument are never emitted as LValue, it is safe to emit
-    // non-null argument check for r-value only.
-    if (!Args.back().hasLValue()) {
-      RValue RVArg = Args.back().getKnownRValue();
-      EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
-                          ParamsToSkip + Idx);
-      // @llvm.objectsize should never have side-effects and shouldn't need
-      // destruction/cleanups, so we can safely "emit" it after its arg,
-      // regardless of right-to-leftness
-      MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
-    }
+    RValue RVArg = Args.back().RV;
+    EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
+                        ParamsToSkip + Idx);
+    // @llvm.objectsize should never have side-effects and shouldn't need
+    // destruction/cleanups, so we can safely "emit" it after its arg,
+    // regardless of right-to-leftness
+    MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
   }
 
   if (!LeftToRight) {
@@ -3482,31 +3471,6 @@
 
 } // end anonymous namespace
 
-RValue CallArg::getRValue(CodeGenFunction &CGF) const {
-  if (!HasLV)
-    return RV;
-  LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
-  CGF.EmitAggregateCopy(Copy, LV, Ty, LV.isVolatile());
-  IsUsed = true;
-  return RValue::getAggregate(Copy.getAddress());
-}
-
-void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
-  LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
-  if (!HasLV && RV.isScalar())
-    CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
-  else if (!HasLV && RV.isComplex())
-    CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
-  else {
-    auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
-    LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
-    CGF.EmitAggregateCopy(Dst, SrcLV, Ty,
-                          HasLV ? LV.isVolatileQualified()
-                                : RV.isVolatileQualified());
-  }
-  IsUsed = true;
-}
-
 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
                                   QualType type) {
   DisableDebugLocationUpdates Dis(*this, E);
@@ -3572,7 +3536,15 @@
       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
     assert(L.isSimple());
-    args.addUncopiedAggregate(L, type);
+    if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
+      args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+    } else {
+      // We can't represent a misaligned lvalue in the CallArgList, so copy
+      // to an aligned temporary now.
+      LValue Dest = MakeAddrLValue(CreateMemTemp(type), type);
+      EmitAggregateCopy(Dest, L, type, L.isVolatile());
+      args.add(RValue::getAggregate(Dest.getAddress()), type);
+    }
     return;
   }
 
@@ -3730,6 +3702,16 @@
   return llvm::CallSite(Inst);
 }
 
+/// \brief Store a non-aggregate value to an address to initialize it.  For
+/// initialization, a non-atomic store will be used.
+static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
+                                        LValue Dst) {
+  if (Src.isScalar())
+    CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
+  else
+    CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
+}
+
 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
                                                   llvm::Value *New) {
   DeferredReplacements.push_back(std::make_pair(Old, New));
@@ -3821,6 +3803,7 @@
   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
        I != E; ++I, ++info_it, ++ArgNo) {
     const ABIArgInfo &ArgInfo = info_it->info;
+    RValue RV = I->RV;
 
     // Insert a padding argument to ensure proper alignment.
     if (IRFunctionArgs.hasPaddingArg(ArgNo))
@@ -3834,16 +3817,13 @@
     case ABIArgInfo::InAlloca: {
       assert(NumIRArgs == 0);
       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
-      if (I->isAggregate()) {
+      if (RV.isAggregate()) {
         // Replace the placeholder with the appropriate argument slot GEP.
-        Address Addr = I->hasLValue()
-                           ? I->getKnownLValue().getAddress()
-                           : I->getKnownRValue().getAggregateAddress();
         llvm::Instruction *Placeholder =
-            cast<llvm::Instruction>(Addr.getPointer());
+            cast<llvm::Instruction>(RV.getAggregatePointer());
         CGBuilderTy::InsertPoint IP = Builder.saveIP();
         Builder.SetInsertPoint(Placeholder);
-        Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+        Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
         Builder.restoreIP(IP);
         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
       } else {
@@ -3856,20 +3836,22 @@
         // from {}* to (%struct.foo*)*.
         if (Addr.getType() != MemType)
           Addr = Builder.CreateBitCast(Addr, MemType);
-        I->copyInto(*this, Addr);
+        LValue argLV = MakeAddrLValue(Addr, I->Ty);
+        EmitInitStoreOfNonAggregate(*this, RV, argLV);
       }
       break;
     }
 
     case ABIArgInfo::Indirect: {
       assert(NumIRArgs == 1);
-      if (!I->isAggregate()) {
+      if (RV.isScalar() || RV.isComplex()) {
         // Make a temporary alloca to pass the argument.
         Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
                                      "indirect-arg-temp", false);
         IRCallArgs[FirstIRArg] = Addr.getPointer();
 
-        I->copyInto(*this, Addr);
+        LValue argLV = MakeAddrLValue(Addr, I->Ty);
+        EmitInitStoreOfNonAggregate(*this, RV, argLV);
       } else {
         // We want to avoid creating an unnecessary temporary+copy here;
         // however, we need one in three cases:
@@ -3877,51 +3859,32 @@
         //    source.  (This case doesn't occur on any common architecture.)
         // 2. If the argument is byval, RV is not sufficiently aligned, and
         //    we cannot force it to be sufficiently aligned.
-        // 3. If the argument is byval, but RV is not located in default
-        //    or alloca address space.
-        Address Addr = I->hasLValue()
-                           ? I->getKnownLValue().getAddress()
-                           : I->getKnownRValue().getAggregateAddress();
-        llvm::Value *V = Addr.getPointer();
+        // 3. If the argument is byval, but RV is located in an address space
+        //    different than that of the argument (0).
+        Address Addr = RV.getAggregateAddress();
         CharUnits Align = ArgInfo.getIndirectAlign();
         const llvm::DataLayout *TD = &CGM.getDataLayout();
-
-        assert((FirstIRArg >= IRFuncTy->getNumParams() ||
-                IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
-                    TD->getAllocaAddrSpace()) &&
-               "indirect argument must be in alloca address space");
-
-        bool NeedCopy = false;
-
-        if (Addr.getAlignment() < Align &&
-            llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
-                Align.getQuantity()) {
-          NeedCopy = true;
-        } else if (I->hasLValue()) {
-          auto LV = I->getKnownLValue();
-          auto AS = LV.getAddressSpace();
-          if ((!ArgInfo.getIndirectByVal() &&
-               (LV.getAlignment() >=
-                getContext().getTypeAlignInChars(I->Ty))) ||
-              (ArgInfo.getIndirectByVal() &&
-               ((AS != LangAS::Default && AS != LangAS::opencl_private &&
-                 AS != CGM.getASTAllocaAddressSpace())))) {
-            NeedCopy = true;
-          }
-        }
-        if (NeedCopy) {
+        const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
+        const unsigned ArgAddrSpace =
+            (FirstIRArg < IRFuncTy->getNumParams()
+                 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
+                 : 0);
+        if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+            (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
+             llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
+                                              Align.getQuantity(), *TD)
+               < Align.getQuantity()) ||
+            (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
           // Create an aligned temporary, and copy to it.
           Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
                                      "byval-temp", false);
           IRCallArgs[FirstIRArg] = AI.getPointer();
-          I->copyInto(*this, AI);
+          LValue Dest = MakeAddrLValue(AI, I->Ty);
+          LValue Src = MakeAddrLValue(Addr, I->Ty);
+          EmitAggregateCopy(Dest, Src, I->Ty, RV.isVolatileQualified());
         } else {
           // Skip the extra memcpy call.
-          auto *T = V->getType()->getPointerElementType()->getPointerTo(
-              CGM.getDataLayout().getAllocaAddrSpace());
-          IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
-              *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
-              true);
+          IRCallArgs[FirstIRArg] = Addr.getPointer();
         }
       }
       break;
@@ -3938,12 +3901,10 @@
           ArgInfo.getDirectOffset() == 0) {
         assert(NumIRArgs == 1);
         llvm::Value *V;
-        if (!I->isAggregate())
-          V = I->getKnownRValue().getScalarVal();
+        if (RV.isScalar())
+          V = RV.getScalarVal();
         else
-          V = Builder.CreateLoad(
-              I->hasLValue() ? I->getKnownLValue().getAddress()
-                             : I->getKnownRValue().getAggregateAddress());
+          V = Builder.CreateLoad(RV.getAggregateAddress());
 
         // Implement swifterror by copying into a new swifterror argument.
         // We'll write back in the normal path out of the call.
@@ -3981,12 +3942,12 @@
 
       // FIXME: Avoid the conversion through memory if possible.
       Address Src = Address::invalid();
-      if (!I->isAggregate()) {
+      if (RV.isScalar() || RV.isComplex()) {
         Src = CreateMemTemp(I->Ty, "coerce");
-        I->copyInto(*this, Src);
+        LValue SrcLV = MakeAddrLValue(Src, I->Ty);
+        EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
       } else {
-        Src = I->hasLValue() ? I->getKnownLValue().getAddress()
-                             : I->getKnownRValue().getAggregateAddress();
+        Src = RV.getAggregateAddress();
       }
 
       // If the value is offset in memory, apply the offset now.
@@ -4040,12 +4001,9 @@
 
       llvm::Value *tempSize = nullptr;
       Address addr = Address::invalid();
-      if (I->isAggregate()) {
-        addr = I->hasLValue() ? I->getKnownLValue().getAddress()
-                              : I->getKnownRValue().getAggregateAddress();
-
+      if (RV.isAggregate()) {
+        addr = RV.getAggregateAddress();
       } else {
-        RValue RV = I->getKnownRValue();
         assert(RV.isScalar()); // complex should always just be direct
 
         llvm::Type *scalarType = RV.getScalarVal()->getType();
@@ -4082,7 +4040,7 @@
 
     case ABIArgInfo::Expand:
       unsigned IRArgPos = FirstIRArg;
-      ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
+      ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
       assert(IRArgPos == FirstIRArg + NumIRArgs);
       break;
     }
@@ -4441,7 +4399,7 @@
                               OffsetValue);
     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
       llvm::Value *ParamVal =
-          CallArgs[AA->getParamIndex() - 1].getRValue(*this).getScalarVal();
+          CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
       EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
     }
   }
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index b66b48b..495baf0 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -213,46 +213,12 @@
   };
 
   struct CallArg {
-  private:
-    union {
-      RValue RV;
-      LValue LV; /// The argument is semantically a load from this l-value.
-    };
-    bool HasLV;
-
-    /// A data-flow flag to make sure getRValue and/or copyInto are not
-    /// called twice for duplicated IR emission.
-    mutable bool IsUsed;
-
-  public:
+    RValue RV;
     QualType Ty;
-    CallArg(RValue rv, QualType ty)
-        : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {}
-    CallArg(LValue lv, QualType ty)
-        : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {}
-    bool hasLValue() const { return HasLV; }
-    QualType getType() const { return Ty; }
-
-    /// \returns an independent RValue. If the CallArg contains an LValue,
-    /// a temporary copy is returned.
-    RValue getRValue(CodeGenFunction &CGF) const;
-
-    LValue getKnownLValue() const {
-      assert(HasLV && !IsUsed);
-      return LV;
-    }
-    RValue getKnownRValue() const {
-      assert(!HasLV && !IsUsed);
-      return RV;
-    }
-    void setRValue(RValue _RV) {
-      assert(!HasLV);
-      RV = _RV;
-    }
-
-    bool isAggregate() const { return HasLV || RV.isAggregate(); }
-
-    void copyInto(CodeGenFunction &CGF, Address A) const;
+    bool NeedsCopy;
+    CallArg(RValue rv, QualType ty, bool needscopy)
+    : RV(rv), Ty(ty), NeedsCopy(needscopy)
+    { }
   };
 
   /// CallArgList - Type for representing both the value and type of
@@ -282,10 +248,8 @@
       llvm::Instruction *IsActiveIP;
     };
 
-    void add(RValue rvalue, QualType type) { push_back(CallArg(rvalue, type)); }
-
-    void addUncopiedAggregate(LValue LV, QualType type) {
-      push_back(CallArg(LV, type));
+    void add(RValue rvalue, QualType type, bool needscopy = false) {
+      push_back(CallArg(rvalue, type, needscopy));
     }
 
     /// Add all the arguments from another CallArgList to this one. After doing
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4ba5c06..56bd250 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -2077,8 +2077,7 @@
     assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
 
     QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
-    Address Src(Args[1].getRValue(*this).getScalarVal(),
-                getNaturalTypeAlignment(SrcTy));
+    Address Src(Args[1].RV.getScalarVal(), getNaturalTypeAlignment(SrcTy));
     LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
     QualType DestTy = getContext().getTypeDeclType(ClassDecl);
     LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2132,7 +2131,8 @@
     const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
     bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
   CallArgList Args;
-  CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()));
+  CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()),
+                  /*NeedsCopy=*/false);
 
   // Forward the parameters.
   if (InheritedFromVBase &&
@@ -2196,7 +2196,7 @@
   assert(Args.size() >= Params.size() && "too few arguments for call");
   for (unsigned I = 0, N = Args.size(); I != N; ++I) {
     if (I < Params.size() && isa<ImplicitParamDecl>(Params[I])) {
-      const RValue &RV = Args[I].getRValue(*this);
+      const RValue &RV = Args[I].RV;
       assert(!RV.isComplex() && "complex indirect params not supported");
       ParamValue Val = RV.isScalar()
                            ? ParamValue::forDirect(RV.getScalarVal())
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 2314fe4..52294c2 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -1882,22 +1882,6 @@
     llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
     if (DeclPtr.getType() != IRTy)
       DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
-    // Indirect argument is in alloca address space, which may be different
-    // from the default address space.
-    auto AllocaAS = CGM.getASTAllocaAddressSpace();
-    auto *V = DeclPtr.getPointer();
-    auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
-    auto DestLangAS =
-        getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
-    if (SrcLangAS != DestLangAS) {
-      assert(getContext().getTargetAddressSpace(SrcLangAS) ==
-             CGM.getDataLayout().getAllocaAddrSpace());
-      auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
-      auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
-      DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
-                            *this, V, SrcLangAS, DestLangAS, T, true),
-                        DeclPtr.getAlignment());
-    }
 
     // Push a destructor cleanup for this parameter if the ABI requires it.
     // Don't push a cleanup in a thunk for a method that will also emit a
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index b874959..9f70ef3 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -265,7 +265,7 @@
         // when it isn't necessary; just produce the proper effect here.
         LValue RHS = isa<CXXOperatorCallExpr>(CE)
                          ? MakeNaturalAlignAddrLValue(
-                               (*RtlArgs)[0].getRValue(*this).getScalarVal(),
+                               (*RtlArgs)[0].RV.getScalarVal(),
                                (*(CE->arg_begin() + 1))->getType())
                          : EmitLValue(*CE->arg_begin());
         EmitAggregateAssign(This, RHS, CE->getType());
@@ -1490,7 +1490,7 @@
                                            AllocAlign);
     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
       auto &Arg = NewArgs[I + NumNonPlacementArgs];
-      Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
+      Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
     }
 
     return;
@@ -1521,8 +1521,8 @@
                                               AllocAlign);
   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
     auto &Arg = NewArgs[I + NumNonPlacementArgs];
-    Cleanup->setPlacementArg(
-        I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
+    Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
+                             Arg.Ty);
   }
 
   CGF.initFullExprCleanup();
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index b5375ff..48156b1 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -83,9 +83,8 @@
                /* ParamsToSkip = */ 0);
 
   // We don't know how to emit non-scalar varargs.
-  if (std::any_of(Args.begin() + 1, Args.end(), [&](const CallArg &A) {
-        return !A.getRValue(*this).isScalar();
-      })) {
+  if (std::any_of(Args.begin() + 1, Args.end(),
+                  [](const CallArg &A) { return !A.RV.isScalar(); })) {
     CGM.ErrorUnsupported(E, "non-scalar arg to printf");
     return RValue::get(llvm::ConstantInt::get(IntTy, 0));
   }
@@ -98,7 +97,7 @@
   } else {
     llvm::SmallVector<llvm::Type *, 8> ArgTypes;
     for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
-      ArgTypes.push_back(Args[I].getRValue(*this).getScalarVal()->getType());
+      ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
 
     // Using llvm::StructType is correct only because printf doesn't accept
     // aggregates.  If we had to handle aggregates here, we'd have to manually
@@ -110,7 +109,7 @@
 
     for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
       llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
-      llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
+      llvm::Value *Arg = Args[I].RV.getScalarVal();
       Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
     }
     BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
@@ -118,6 +117,6 @@
 
   // Invoke vprintf and return.
   llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
-  return RValue::get(Builder.CreateCall(
-      VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
+  return RValue::get(
+      Builder.CreateCall(VprintfFunc, {Args[0].RV.getScalarVal(), BufferPtr}));
 }
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index a6b6c38..5a025ce 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -1441,7 +1441,7 @@
   }
 
   // Reset the receiver in case the lookup modified it
-  ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy);
+  ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
 
   imp = EnforceType(Builder, imp, MSI.MessengerType);
 
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 64f40e1..c3793b2 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1708,7 +1708,7 @@
            e = Method->param_end(); i != e; ++i, ++I) {
         const ParmVarDecl *ParamDecl = (*i);
         if (ParamDecl->hasAttr<NSConsumedAttr>()) {
-          RValue RV = I->getRValue(CGF);
+          RValue RV = I->RV;
           assert(RV.isScalar() && 
                  "NullReturnState::complete - arg not on object");
           CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
@@ -7071,7 +7071,7 @@
             CGF.getPointerAlign());
 
   // Update the message ref argument.
-  args[1].setRValue(RValue::get(mref.getPointer()));
+  args[1].RV = RValue::get(mref.getPointer());
 
   // Load the function to call from the message ref table.
   Address calleeAddr =
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 1f288c4..95d8229 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3896,10 +3896,10 @@
   void ExpandTypeFromArgs(QualType Ty, LValue Dst,
                           SmallVectorImpl<llvm::Value *>::iterator &AI);
 
-  /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
+  /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
   /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
   /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
-  void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
+  void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
                         SmallVectorImpl<llvm::Value *> &IRCallArgs,
                         unsigned &IRCallArgPos);
 
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 0f0956d..21223ea 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1474,7 +1474,8 @@
   llvm::Value *VTT =
       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
-  Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
+  Args.insert(Args.begin() + 1,
+              CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
   return AddedStructorArgs::prefix(1);  // Added one arg.
 }
 
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index d48dfbf..787c9ea 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1538,7 +1538,8 @@
   }
   RValue RV = RValue::get(MostDerivedArg);
   if (FPT->isVariadic()) {
-    Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
+    Args.insert(Args.begin() + 1,
+                CallArg(RV, getContext().IntTy, /*needscopy=*/false));
     return AddedStructorArgs::prefix(1);
   }
   Args.add(RV, getContext().IntTy);