[AST] Change return type of getTypeInfoInChars to a proper struct instead of std::pair.
Followup to D85191.
This changes getTypeInfoInChars to return a TypeInfoChars
struct instead of a std::pair of CharUnits. This lets the
interface match getTypeInfo more closely.
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D86447
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index a640cb7..c7256e2 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -806,13 +806,12 @@
return RValue::get(nullptr);
}
- CharUnits sizeChars, alignChars;
- std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
- uint64_t Size = sizeChars.getQuantity();
+ auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
+ uint64_t Size = TInfo.Width.getQuantity();
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
- bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
- bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
+ bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
+ bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
bool UseLibcall = Misaligned | Oversized;
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
@@ -821,13 +820,13 @@
if (Misaligned) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
- << (int)sizeChars.getQuantity()
+ << (int)TInfo.Width.getQuantity()
<< (int)Ptr.getAlignment().getQuantity();
}
if (Oversized) {
Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
- << (int)sizeChars.getQuantity() << (int)MaxInlineWidth.getQuantity();
+ << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
}
llvm::Value *Order = EmitScalarExpr(E->getOrder());
@@ -1080,7 +1079,7 @@
EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -1093,7 +1092,7 @@
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -1105,7 +1104,7 @@
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -1125,7 +1124,7 @@
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_and_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_and_N(T *mem, T val, int order)
@@ -1137,7 +1136,7 @@
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_or_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_or_N(T *mem, T val, int order)
@@ -1149,7 +1148,7 @@
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_sub_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_sub_N(T *mem, T val, int order)
@@ -1161,7 +1160,7 @@
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_xor_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_xor_N(T *mem, T val, int order)
@@ -1173,7 +1172,7 @@
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_min_fetch:
PostOpMinMax = true;
@@ -1185,7 +1184,7 @@
? "__atomic_fetch_min"
: "__atomic_fetch_umin";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_max_fetch:
PostOpMinMax = true;
@@ -1197,7 +1196,7 @@
? "__atomic_fetch_max"
: "__atomic_fetch_umax";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- LoweredMemTy, E->getExprLoc(), sizeChars);
+ LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
// T __atomic_nand_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_nand_N(T *mem, T val, int order)
@@ -1207,7 +1206,7 @@
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
- MemTy, E->getExprLoc(), sizeChars);
+ MemTy, E->getExprLoc(), TInfo.Width);
break;
}
@@ -1225,7 +1224,7 @@
// Value is returned directly.
// The function returns an appropriately sized integer type.
RetTy = getContext().getIntTypeForBitwidth(
- getContext().toBits(sizeChars), /*Signed=*/false);
+ getContext().toBits(TInfo.Width), /*Signed=*/false);
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index ee0c146..a4f09af 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -557,11 +557,10 @@
// Theoretically, this could be in a different address space, so
// don't assume standard pointer size/align.
llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
- std::pair<CharUnits,CharUnits> tinfo
- = CGM.getContext().getTypeInfoInChars(thisType);
- maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, TInfo.Align);
- layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ layout.push_back(BlockLayoutChunk(TInfo.Align, TInfo.Width,
Qualifiers::OCL_None,
nullptr, llvmType, thisType));
}
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index b0b76ff..7c5ab39 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -354,14 +354,12 @@
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
CharUnits Offset = CharUnits::Zero();
for (const VarDecl *A : Args) {
- CharUnits TyWidth, TyAlign;
- std::tie(TyWidth, TyAlign) =
- CGM.getContext().getTypeInfoInChars(A->getType());
- Offset = Offset.alignTo(TyAlign);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(A->getType());
+ Offset = Offset.alignTo(TInfo.Align);
llvm::Value *Args[] = {
CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
VoidPtrTy),
- llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
+ llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
@@ -370,7 +368,7 @@
llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
CGF.Builder.CreateCondBr(CBZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
- Offset += TyWidth;
+ Offset += TInfo.Width;
}
// Emit the call to cudaLaunch
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index cb03e02..0d050dc 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -2285,8 +2285,8 @@
auto PTy = ParamType->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
auto info = getContext().getTypeInfoInChars(PTy);
- Attrs.addDereferenceableAttr(info.first.getQuantity());
- Attrs.addAlignmentAttr(info.second.getAsAlign());
+ Attrs.addDereferenceableAttr(info.Width.getQuantity());
+ Attrs.addAlignmentAttr(info.Align.getAsAlign());
}
break;
}
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4d143e3..c41650a 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -798,9 +798,8 @@
size_t NumFields = 0;
for (const auto *Field : ClassDecl->fields()) {
const FieldDecl *D = Field;
- std::pair<CharUnits, CharUnits> FieldInfo =
- Context.getTypeInfoInChars(D->getType());
- CharUnits FieldSize = FieldInfo.first;
+ auto FieldInfo = Context.getTypeInfoInChars(D->getType());
+ CharUnits FieldSize = FieldInfo.Width;
assert(NumFields < SSV.size());
SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
NumFields++;
@@ -947,7 +946,7 @@
LastField->isBitField()
? LastField->getBitWidthValue(Ctx)
: Ctx.toBits(
- Ctx.getTypeInfoDataSizeInChars(LastField->getType()).first);
+ Ctx.getTypeInfoDataSizeInChars(LastField->getType()).Width);
uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize -
FirstByteOffset + Ctx.getCharWidth() - 1;
CharUnits MemcpySize = Ctx.toCharUnitsFromBits(MemcpySizeBits);
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 2081576..43e23d9 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -1976,28 +1976,28 @@
// Get data size info for this aggregate. Don't copy the tail padding if this
// might be a potentially-overlapping subobject, since the tail padding might
// be occupied by a different object. Otherwise, copying it is fine.
- std::pair<CharUnits, CharUnits> TypeInfo;
+ TypeInfoChars TypeInfo;
if (MayOverlap)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
llvm::Value *SizeVal = nullptr;
- if (TypeInfo.first.isZero()) {
+ if (TypeInfo.Width.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
QualType BaseEltTy;
SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
- assert(!TypeInfo.first.isZero());
+ assert(!TypeInfo.Width.isZero());
SizeVal = Builder.CreateNUWMul(
SizeVal,
- llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
+ llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()));
}
}
if (!SizeVal) {
- SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
+ SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity());
}
// FIXME: If we have a volatile struct, the optimizer can remove what might
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index f905e17e..a647128 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -919,8 +919,9 @@
// Evaluate the ivar's size and alignment.
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
QualType ivarType = ivar->getType();
- std::tie(IvarSize, IvarAlignment) =
- CGM.getContext().getTypeInfoInChars(ivarType);
+ auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType);
+ IvarSize = TInfo.Width;
+ IvarAlignment = TInfo.Align;
// If we have a copy property, we always have to use getProperty/setProperty.
// TODO: we could actually use setProperty and an expression for non-atomics.
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index ce35880..880342d 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -742,7 +742,7 @@
assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
"should not have reused this field's tail padding");
Prior->Data = getByteArrayType(
- Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).first);
+ Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
}
}
if (Member->Data)
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index 70e6fed..4b39a05 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -653,7 +653,7 @@
/// is the type size unless that might overlap another object, in which
/// case it's the dsize.
CharUnits getPreferredSize(ASTContext &Ctx, QualType Type) const {
- return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).first
+ return mayOverlap() ? Ctx.getTypeInfoDataSizeInChars(Type).Width
: Ctx.getTypeSizeInChars(Type);
}
};
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index d7c2794..31fbe39 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -359,7 +359,7 @@
/// leaving one or more empty slots behind as padding.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType ValueTy, bool IsIndirect,
- std::pair<CharUnits, CharUnits> ValueInfo,
+ TypeInfoChars ValueInfo,
CharUnits SlotSizeAndAlign,
bool AllowHigherAlign) {
// The size and alignment of the value that was passed directly.
@@ -368,8 +368,8 @@
DirectSize = CGF.getPointerSize();
DirectAlign = CGF.getPointerAlign();
} else {
- DirectSize = ValueInfo.first;
- DirectAlign = ValueInfo.second;
+ DirectSize = ValueInfo.Width;
+ DirectAlign = ValueInfo.Align;
}
// Cast the address we've calculated to the right type.
@@ -383,7 +383,7 @@
AllowHigherAlign);
if (IsIndirect) {
- Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
}
return Addr;
@@ -656,7 +656,7 @@
"Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlignForABI = TyInfo.second;
+ CharUnits TyAlignForABI = TyInfo.Align;
llvm::Type *BaseTy =
llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
@@ -2062,8 +2062,8 @@
//
// Just messing with TypeInfo like this works because we never pass
// anything indirectly.
- TypeInfo.second = CharUnits::fromQuantity(
- getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
+ TypeInfo.Align = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
TypeInfo, CharUnits::fromQuantity(4),
@@ -4067,10 +4067,9 @@
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
- std::pair<CharUnits, CharUnits> SizeAlign =
- getContext().getTypeInfoInChars(Ty);
- uint64_t TySize = SizeAlign.first.getQuantity();
- CharUnits TyAlign = SizeAlign.second;
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+ uint64_t TySize = TInfo.Width.getQuantity();
+ CharUnits TyAlign = TInfo.Align;
// Copy into a temporary if the type is more aligned than the
// register save area.
@@ -4573,7 +4572,7 @@
llvm::report_fatal_error("vector type is not supported on AIX yet");
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.second = getParamTypeAlignment(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
@@ -4692,7 +4691,7 @@
QualType Ty) const {
if (getTarget().getTriple().isOSDarwin()) {
auto TI = getContext().getTypeInfoInChars(Ty);
- TI.second = getParamTypeAlignment(Ty);
+ TI.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(4);
return emitVoidPtrVAArg(CGF, VAList, Ty,
@@ -4802,7 +4801,7 @@
CharUnits Size;
if (!isIndirect) {
auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
- Size = TypeInfo.first.alignTo(OverflowAreaAlign);
+ Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
} else {
Size = CGF.getPointerSize();
}
@@ -5365,7 +5364,7 @@
Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.second = getParamTypeAlignment(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
CharUnits SlotSize = CharUnits::fromQuantity(8);
@@ -5376,7 +5375,7 @@
// loads of the real and imaginary parts relative to the va_list pointer,
// and store them to a temporary structure.
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- CharUnits EltSize = TypeInfo.first / 2;
+ CharUnits EltSize = TypeInfo.Width / 2;
if (EltSize < SlotSize) {
Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
SlotSize * 2, SlotSize,
@@ -6008,13 +6007,13 @@
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
Address Tmp = CGF.CreateTempAlloca(HFATy,
- std::max(TyAlign, BaseTyInfo.second));
+ std::max(TyAlign, BaseTyInfo.Align));
// On big-endian platforms, the value will be right-aligned in its slot.
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() &&
- BaseTyInfo.first.getQuantity() < 16)
- Offset = 16 - BaseTyInfo.first.getQuantity();
+ BaseTyInfo.Width.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.Width.getQuantity();
for (unsigned i = 0; i < NumMembers; ++i) {
CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
@@ -6138,7 +6137,7 @@
// Arguments bigger than 16 bytes which aren't homogeneous
// aggregates should be passed indirectly.
bool IsIndirect = false;
- if (TyInfo.first.getQuantity() > 16) {
+ if (TyInfo.Width.getQuantity() > 16) {
const Type *Base = nullptr;
uint64_t Members = 0;
IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
@@ -6900,7 +6899,7 @@
TyAlignForABI = CharUnits::fromQuantity(4);
}
- std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
+ TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
SlotSize, /*AllowHigherAlign*/ true);
}
@@ -7374,8 +7373,8 @@
ArgTy = AI.getCoerceToType();
InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
IsVector = ArgTy->isVectorTy();
- UnpaddedSize = TyInfo.first;
- DirectAlign = TyInfo.second;
+ UnpaddedSize = TyInfo.Width;
+ DirectAlign = TyInfo.Align;
}
CharUnits PaddedSize = CharUnits::fromQuantity(8);
if (IsVector && UnpaddedSize > PaddedSize)
@@ -7396,7 +7395,7 @@
CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- TyInfo.second);
+ TyInfo.Align);
Address MemAddr =
CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
@@ -7493,7 +7492,7 @@
if (IsIndirect)
ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
- TyInfo.second);
+ TyInfo.Align);
return ResAddr;
}
@@ -7990,8 +7989,8 @@
// The alignment of things in the argument area is never larger than
// StackAlignInBytes.
- TyInfo.second =
- std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
+ TyInfo.Align =
+ std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
// MinABIStackAlignInBytes is the size of argument slots on the stack.
CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
@@ -9454,7 +9453,7 @@
case ABIArgInfo::Extend: {
Stride = SlotSize;
- CharUnits Offset = SlotSize - TypeInfo.first;
+ CharUnits Offset = SlotSize - TypeInfo.Width;
ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
break;
}
@@ -9471,11 +9470,11 @@
Stride = SlotSize;
ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
- TypeInfo.second);
+ TypeInfo.Align);
break;
case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
+ return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
}
// Update VAList.
@@ -10771,13 +10770,12 @@
return Addr;
}
- std::pair<CharUnits, CharUnits> SizeAndAlign =
- getContext().getTypeInfoInChars(Ty);
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
// Arguments bigger than 2*Xlen bytes are passed indirectly.
- bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
+ bool IsIndirect = TInfo.Width > 2 * SlotSize;
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
SlotSize, /*AllowHigherAlign=*/true);
}