CodeGen: Fix address space of indirect function argument
The indirect function argument is in alloca address space in LLVM IR. However,
during Clang codegen for C++, the address space of indirect function argument
should match its address space in the source code, i.e., default addr space, even
for indirect argument. This is because destructor of the indirect argument may
be called in the caller function, and address of the indirect argument may be
taken, in either case the indirect function argument is expected to be in default
addr space, not the alloca address space.
Therefore, the indirect function argument should be mapped to the temp var
casted to default address space. The caller will cast it to alloca addr space
when passing it to the callee. In the callee, the argument is also casted to the
default address space and used.
CallArg is refactored to facilitate this fix.
Differential Revision: https://reviews.llvm.org/D34367
llvm-svn: 326946
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 3d24e1f..7e685d2 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -1160,7 +1160,7 @@
if (UseOptimizedLibcall && Res.getScalarVal()) {
llvm::Value *ResVal = Res.getScalarVal();
if (PostOp) {
- llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
+ llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
}
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 48841b5..11bed16 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1040,42 +1040,49 @@
}
void CodeGenFunction::ExpandTypeToArgs(
- QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
- [&](Address EltAddr) {
- RValue EltRV =
- convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
- ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
- });
+ Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
+ : Arg.getKnownRValue().getAggregateAddress();
+ forConstantArrayExpansion(
+ *this, CAExp, Addr, [&](Address EltAddr) {
+ CallArg EltArg = CallArg(
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
+ CAExp->EltTy);
+ ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ });
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = RV.getAggregateAddress();
+ Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
+ : Arg.getKnownRValue().getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
Address Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
- RValue BaseRV = RValue::getAggregate(Base);
+ CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
// Recurse onto bases.
- ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
LValue LV = MakeAddrLValue(This, Ty);
for (auto FD : RExp->Fields) {
- RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
- ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ CallArg FldArg =
+ CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
+ ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
} else if (isa<ComplexExpansion>(Exp.get())) {
- ComplexPairTy CV = RV.getComplexVal();
+ ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
IRCallArgs[IRCallArgPos++] = CV.first;
IRCallArgs[IRCallArgPos++] = CV.second;
} else {
assert(isa<NoExpansion>(Exp.get()));
+ auto RV = Arg.getKnownRValue();
assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
@@ -3418,13 +3425,17 @@
assert(InitialArgSize + 1 == Args.size() &&
"The code below depends on only adding one arg per EmitCallArg");
(void)InitialArgSize;
- RValue RVArg = Args.back().RV;
- EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
- ParamsToSkip + Idx);
- // @llvm.objectsize should never have side-effects and shouldn't need
- // destruction/cleanups, so we can safely "emit" it after its arg,
- // regardless of right-to-leftness
- MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
+ // Since pointer argument are never emitted as LValue, it is safe to emit
+ // non-null argument check for r-value only.
+ if (!Args.back().hasLValue()) {
+ RValue RVArg = Args.back().getKnownRValue();
+ EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
+ ParamsToSkip + Idx);
+ // @llvm.objectsize should never have side-effects and shouldn't need
+ // destruction/cleanups, so we can safely "emit" it after its arg,
+ // regardless of right-to-leftness
+ MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
+ }
}
if (!LeftToRight) {
@@ -3471,6 +3482,31 @@
} // end anonymous namespace
+RValue CallArg::getRValue(CodeGenFunction &CGF) const {
+ if (!HasLV)
+ return RV;
+ LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
+ CGF.EmitAggregateCopy(Copy, LV, Ty, LV.isVolatile());
+ IsUsed = true;
+ return RValue::getAggregate(Copy.getAddress());
+}
+
+void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
+ LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
+ if (!HasLV && RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
+ else if (!HasLV && RV.isComplex())
+ CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
+ else {
+ auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
+ LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
+ CGF.EmitAggregateCopy(Dst, SrcLV, Ty,
+ HasLV ? LV.isVolatileQualified()
+ : RV.isVolatileQualified());
+ }
+ IsUsed = true;
+}
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
DisableDebugLocationUpdates Dis(*this, E);
@@ -3536,15 +3572,7 @@
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
- args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
- } else {
- // We can't represent a misaligned lvalue in the CallArgList, so copy
- // to an aligned temporary now.
- LValue Dest = MakeAddrLValue(CreateMemTemp(type), type);
- EmitAggregateCopy(Dest, L, type, L.isVolatile());
- args.add(RValue::getAggregate(Dest.getAddress()), type);
- }
+ args.addUncopiedAggregate(L, type);
return;
}
@@ -3702,16 +3730,6 @@
return llvm::CallSite(Inst);
}
-/// \brief Store a non-aggregate value to an address to initialize it. For
-/// initialization, a non-atomic store will be used.
-static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
- LValue Dst) {
- if (Src.isScalar())
- CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
- else
- CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
-}
-
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
llvm::Value *New) {
DeferredReplacements.push_back(std::make_pair(Old, New));
@@ -3804,7 +3822,6 @@
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
- RValue RV = I->RV;
// Insert a padding argument to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
@@ -3818,13 +3835,16 @@
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
- if (RV.isAggregate()) {
+ if (I->isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
+ Address Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
- cast<llvm::Instruction>(RV.getAggregatePointer());
+ cast<llvm::Instruction>(Addr.getPointer());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
- Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
+ Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
deferPlaceholderReplacement(Placeholder, Addr.getPointer());
} else {
@@ -3837,22 +3857,20 @@
// from {}* to (%struct.foo*)*.
if (Addr.getType() != MemType)
Addr = Builder.CreateBitCast(Addr, MemType);
- LValue argLV = MakeAddrLValue(Addr, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ I->copyInto(*this, Addr);
}
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
- if (RV.isScalar() || RV.isComplex()) {
+ if (!I->isAggregate()) {
// Make a temporary alloca to pass the argument.
Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
"indirect-arg-temp", false);
IRCallArgs[FirstIRArg] = Addr.getPointer();
- LValue argLV = MakeAddrLValue(Addr, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, argLV);
+ I->copyInto(*this, Addr);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
@@ -3860,32 +3878,51 @@
// source. (This case doesn't occur on any common architecture.)
// 2. If the argument is byval, RV is not sufficiently aligned, and
// we cannot force it to be sufficiently aligned.
- // 3. If the argument is byval, but RV is located in an address space
- // different than that of the argument (0).
- Address Addr = RV.getAggregateAddress();
+ // 3. If the argument is byval, but RV is not located in default
+ // or alloca address space.
+ Address Addr = I->hasLValue()
+ ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
+ llvm::Value *V = Addr.getPointer();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
- const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
- const unsigned ArgAddrSpace =
- (FirstIRArg < IRFuncTy->getNumParams()
- ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
- : 0);
- if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
- (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
- llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
- Align.getQuantity(), *TD)
- < Align.getQuantity()) ||
- (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
+
+ assert((FirstIRArg >= IRFuncTy->getNumParams() ||
+ IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
+ TD->getAllocaAddrSpace()) &&
+ "indirect argument must be in alloca address space");
+
+ bool NeedCopy = false;
+
+ if (Addr.getAlignment() < Align &&
+ llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
+ Align.getQuantity()) {
+ NeedCopy = true;
+ } else if (I->hasLValue()) {
+ auto LV = I->getKnownLValue();
+ auto AS = LV.getAddressSpace();
+ if ((!ArgInfo.getIndirectByVal() &&
+ (LV.getAlignment() >=
+ getContext().getTypeAlignInChars(I->Ty))) ||
+ (ArgInfo.getIndirectByVal() &&
+ ((AS != LangAS::Default && AS != LangAS::opencl_private &&
+ AS != CGM.getASTAllocaAddressSpace())))) {
+ NeedCopy = true;
+ }
+ }
+ if (NeedCopy) {
// Create an aligned temporary, and copy to it.
Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
"byval-temp", false);
IRCallArgs[FirstIRArg] = AI.getPointer();
- LValue Dest = MakeAddrLValue(AI, I->Ty);
- LValue Src = MakeAddrLValue(Addr, I->Ty);
- EmitAggregateCopy(Dest, Src, I->Ty, RV.isVolatileQualified());
+ I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- IRCallArgs[FirstIRArg] = Addr.getPointer();
+ auto *T = V->getType()->getPointerElementType()->getPointerTo(
+ CGM.getDataLayout().getAllocaAddrSpace());
+ IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
+ *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
+ true);
}
}
break;
@@ -3902,10 +3939,12 @@
ArgInfo.getDirectOffset() == 0) {
assert(NumIRArgs == 1);
llvm::Value *V;
- if (RV.isScalar())
- V = RV.getScalarVal();
+ if (!I->isAggregate())
+ V = I->getKnownRValue().getScalarVal();
else
- V = Builder.CreateLoad(RV.getAggregateAddress());
+ V = Builder.CreateLoad(
+ I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress());
// Implement swifterror by copying into a new swifterror argument.
// We'll write back in the normal path out of the call.
@@ -3943,12 +3982,12 @@
// FIXME: Avoid the conversion through memory if possible.
Address Src = Address::invalid();
- if (RV.isScalar() || RV.isComplex()) {
+ if (!I->isAggregate()) {
Src = CreateMemTemp(I->Ty, "coerce");
- LValue SrcLV = MakeAddrLValue(Src, I->Ty);
- EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
+ I->copyInto(*this, Src);
} else {
- Src = RV.getAggregateAddress();
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
}
// If the value is offset in memory, apply the offset now.
@@ -4002,9 +4041,12 @@
llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
- if (RV.isAggregate()) {
- addr = RV.getAggregateAddress();
+ if (I->isAggregate()) {
+ addr = I->hasLValue() ? I->getKnownLValue().getAddress()
+ : I->getKnownRValue().getAggregateAddress();
+
} else {
+ RValue RV = I->getKnownRValue();
assert(RV.isScalar()); // complex should always just be direct
llvm::Type *scalarType = RV.getScalarVal()->getType();
@@ -4041,7 +4083,7 @@
case ABIArgInfo::Expand:
unsigned IRArgPos = FirstIRArg;
- ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
@@ -4393,7 +4435,7 @@
OffsetValue);
} else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
llvm::Value *ParamVal =
- CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
+ CallArgs[AA->getParamIndex() - 1].getRValue(*this).getScalarVal();
EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
}
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 495baf0..b66b48b 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -213,12 +213,46 @@
};
struct CallArg {
- RValue RV;
+ private:
+ union {
+ RValue RV;
+ LValue LV; /// The argument is semantically a load from this l-value.
+ };
+ bool HasLV;
+
+ /// A data-flow flag to make sure getRValue and/or copyInto are not
+ /// called twice for duplicated IR emission.
+ mutable bool IsUsed;
+
+ public:
QualType Ty;
- bool NeedsCopy;
- CallArg(RValue rv, QualType ty, bool needscopy)
- : RV(rv), Ty(ty), NeedsCopy(needscopy)
- { }
+ CallArg(RValue rv, QualType ty)
+ : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {}
+ CallArg(LValue lv, QualType ty)
+ : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {}
+ bool hasLValue() const { return HasLV; }
+ QualType getType() const { return Ty; }
+
+ /// \returns an independent RValue. If the CallArg contains an LValue,
+ /// a temporary copy is returned.
+ RValue getRValue(CodeGenFunction &CGF) const;
+
+ LValue getKnownLValue() const {
+ assert(HasLV && !IsUsed);
+ return LV;
+ }
+ RValue getKnownRValue() const {
+ assert(!HasLV && !IsUsed);
+ return RV;
+ }
+ void setRValue(RValue _RV) {
+ assert(!HasLV);
+ RV = _RV;
+ }
+
+ bool isAggregate() const { return HasLV || RV.isAggregate(); }
+
+ void copyInto(CodeGenFunction &CGF, Address A) const;
};
/// CallArgList - Type for representing both the value and type of
@@ -248,8 +282,10 @@
llvm::Instruction *IsActiveIP;
};
- void add(RValue rvalue, QualType type, bool needscopy = false) {
- push_back(CallArg(rvalue, type, needscopy));
+ void add(RValue rvalue, QualType type) { push_back(CallArg(rvalue, type)); }
+
+ void addUncopiedAggregate(LValue LV, QualType type) {
+ push_back(CallArg(LV, type));
}
/// Add all the arguments from another CallArgList to this one. After doing
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 56bd250..4ba5c06 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -2077,7 +2077,8 @@
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src(Args[1].RV.getScalarVal(), getNaturalTypeAlignment(SrcTy));
+ Address Src(Args[1].getRValue(*this).getScalarVal(),
+ getNaturalTypeAlignment(SrcTy));
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2131,8 +2132,7 @@
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
CallArgList Args;
- CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()),
- /*NeedsCopy=*/false);
+ CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()));
// Forward the parameters.
if (InheritedFromVBase &&
@@ -2196,7 +2196,7 @@
assert(Args.size() >= Params.size() && "too few arguments for call");
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (I < Params.size() && isa<ImplicitParamDecl>(Params[I])) {
- const RValue &RV = Args[I].RV;
+ const RValue &RV = Args[I].getRValue(*this);
assert(!RV.isComplex() && "complex indirect params not supported");
ParamValue Val = RV.isScalar()
? ParamValue::forDirect(RV.getScalarVal())
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 52294c2..2314fe4 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -1882,6 +1882,22 @@
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
if (DeclPtr.getType() != IRTy)
DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+ // Indirect argument is in alloca address space, which may be different
+ // from the default address space.
+ auto AllocaAS = CGM.getASTAllocaAddressSpace();
+ auto *V = DeclPtr.getPointer();
+ auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
+ auto DestLangAS =
+ getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
+ if (SrcLangAS != DestLangAS) {
+ assert(getContext().getTargetAddressSpace(SrcLangAS) ==
+ CGM.getDataLayout().getAllocaAddrSpace());
+ auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
+ auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
+ DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
+ *this, V, SrcLangAS, DestLangAS, T, true),
+ DeclPtr.getAlignment());
+ }
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 9f70ef3..b874959 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -265,7 +265,7 @@
// when it isn't necessary; just produce the proper effect here.
LValue RHS = isa<CXXOperatorCallExpr>(CE)
? MakeNaturalAlignAddrLValue(
- (*RtlArgs)[0].RV.getScalarVal(),
+ (*RtlArgs)[0].getRValue(*this).getScalarVal(),
(*(CE->arg_begin() + 1))->getType())
: EmitLValue(*CE->arg_begin());
EmitAggregateAssign(This, RHS, CE->getType());
@@ -1490,7 +1490,7 @@
AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
- Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
+ Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
}
return;
@@ -1521,8 +1521,8 @@
AllocAlign);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
auto &Arg = NewArgs[I + NumNonPlacementArgs];
- Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
- Arg.Ty);
+ Cleanup->setPlacementArg(
+ I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
}
CGF.initFullExprCleanup();
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index 48156b1..b5375ff 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -83,8 +83,9 @@
/* ParamsToSkip = */ 0);
// We don't know how to emit non-scalar varargs.
- if (std::any_of(Args.begin() + 1, Args.end(),
- [](const CallArg &A) { return !A.RV.isScalar(); })) {
+ if (std::any_of(Args.begin() + 1, Args.end(), [&](const CallArg &A) {
+ return !A.getRValue(*this).isScalar();
+ })) {
CGM.ErrorUnsupported(E, "non-scalar arg to printf");
return RValue::get(llvm::ConstantInt::get(IntTy, 0));
}
@@ -97,7 +98,7 @@
} else {
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
- ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
+ ArgTypes.push_back(Args[I].getRValue(*this).getScalarVal()->getType());
// Using llvm::StructType is correct only because printf doesn't accept
// aggregates. If we had to handle aggregates here, we'd have to manually
@@ -109,7 +110,7 @@
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
- llvm::Value *Arg = Args[I].RV.getScalarVal();
+ llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlignment(Arg->getType()));
}
BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
@@ -117,6 +118,6 @@
// Invoke vprintf and return.
llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
- return RValue::get(
- Builder.CreateCall(VprintfFunc, {Args[0].RV.getScalarVal(), BufferPtr}));
+ return RValue::get(Builder.CreateCall(
+ VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
}
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index 5a025ce..a6b6c38 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -1441,7 +1441,7 @@
}
// Reset the receiver in case the lookup modified it
- ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy);
imp = EnforceType(Builder, imp, MSI.MessengerType);
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index c3793b2..64f40e1 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1708,7 +1708,7 @@
e = Method->param_end(); i != e; ++i, ++I) {
const ParmVarDecl *ParamDecl = (*i);
if (ParamDecl->hasAttr<NSConsumedAttr>()) {
- RValue RV = I->RV;
+ RValue RV = I->getRValue(CGF);
assert(RV.isScalar() &&
"NullReturnState::complete - arg not on object");
CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
@@ -7071,7 +7071,7 @@
CGF.getPointerAlign());
// Update the message ref argument.
- args[1].RV = RValue::get(mref.getPointer());
+ args[1].setRValue(RValue::get(mref.getPointer()));
// Load the function to call from the message ref table.
Address calleeAddr =
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 19deb9b..fa65b1d 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3899,10 +3899,10 @@
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
SmallVectorImpl<llvm::Value *>::iterator &AI);
- /// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
+ /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
/// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
- void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ void ExpandTypeToArgs(QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 3b45e57..cb0af7b 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1474,8 +1474,7 @@
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
- Args.insert(Args.begin() + 1,
- CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false));
+ Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
return AddedStructorArgs::prefix(1); // Added one arg.
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 43c061e..74b744a 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1537,8 +1537,7 @@
}
RValue RV = RValue::get(MostDerivedArg);
if (FPT->isVariadic()) {
- Args.insert(Args.begin() + 1,
- CallArg(RV, getContext().IntTy, /*needscopy=*/false));
+ Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy));
return AddedStructorArgs::prefix(1);
}
Args.add(RV, getContext().IntTy);