Reland "[SelectionDAG] Enable target specific vector scalarization of calls and returns"
By target hookifying getRegisterType, getNumRegisters, getVectorBreakdown,
backends can request that LLVM to scalarize vector types for calls
and returns.
The MIPS vector ABI requires that vector arguments and returns are passed in
integer registers. With SelectionDAG's new hooks, the MIPS backend can now
handle LLVM-IR with vector types in calls and returns. E.g.
'call @foo(<4 x i32> %4)'.
Previously these cases would be scalarized for the MIPS O32/N32/N64 ABI for
calls and returns if vector types were not legal. If vector types were legal,
a single 128bit vector argument would be assigned to a single 32 bit / 64 bit
integer register.
By teaching the MIPS backend to inspect the original types, it can now
implement the MIPS vector ABI which requires a particular method of
scalarizing vectors.
Previously, the MIPS backend relied on clang to scalarize types such as "call
@foo(<4 x float> %a) into "call @foo(i32 inreg %1, i32 inreg %2, i32 inreg %3,
i32 inreg %4)".
This patch enables the MIPS backend to take either form for vector types.
The previous version of this patch had a "conditional move or jump depends on
uninitialized value".
Reviewers: zoran.jovanovic, jaydeep, vkalintiris, slthakur
Differential Revision: https://reviews.llvm.org/D27845
llvm-svn: 305083
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9b2710c..d34ac40 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -101,7 +101,8 @@
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
- MVT PartVT, EVT ValueVT, const Value *V);
+ MVT PartVT, EVT ValueVT, const Value *V,
+ bool IsABIRegCopy);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
@@ -111,10 +112,11 @@
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
- Optional<ISD::NodeType> AssertOp = None) {
+ Optional<ISD::NodeType> AssertOp = None,
+ bool IsABIRegCopy = false) {
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
- PartVT, ValueVT, V);
+ PartVT, ValueVT, V, IsABIRegCopy);
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -258,7 +260,8 @@
/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
- MVT PartVT, EVT ValueVT, const Value *V) {
+ MVT PartVT, EVT ValueVT, const Value *V,
+ bool IsABIRegCopy) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -269,9 +272,18 @@
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
- unsigned NumRegs =
- TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
- NumIntermediates, RegisterVT);
+ unsigned NumRegs;
+
+ if (IsABIRegCopy) {
+ NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
+ *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
+ RegisterVT);
+ } else {
+ NumRegs =
+ TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
+ NumIntermediates, RegisterVT);
+ }
+
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
@@ -300,9 +312,14 @@
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
+ EVT BuiltVectorTy =
+ EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
+ (IntermediateVT.isVector()
+ ? IntermediateVT.getVectorNumElements() * NumParts
+ : NumIntermediates));
Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
- DL, ValueVT, Ops);
+ DL, BuiltVectorTy, Ops);
}
// There is now one part, held in Val. Correct it to match ValueVT.
@@ -341,13 +358,29 @@
TLI.isTypeLegal(ValueVT))
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
- // Handle cases such as i8 -> <1 x i1>
if (ValueVT.getVectorNumElements() != 1) {
- diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
- "non-trivial scalar-to-vector conversion");
- return DAG.getUNDEF(ValueVT);
+ // Certain ABIs require that vectors are passed as integers. For vectors
+ // are the same size, this is an obvious bitcast.
+ if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
+ return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
+ } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
+ // Bitcast Val back the original type and extract the corresponding
+ // vector we want.
+ unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
+ EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
+ ValueVT.getVectorElementType(), Elts);
+ Val = DAG.getBitcast(WiderVecType, Val);
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ }
+
+ diagnosePossiblyInvalidConstraint(
+ *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
+ return DAG.getUNDEF(ValueVT);
}
+ // Handle cases such as i8 -> <1 x i1>
EVT ValueSVT = ValueVT.getVectorElementType();
if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
@@ -358,7 +391,7 @@
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
- MVT PartVT, const Value *V);
+ MVT PartVT, const Value *V, bool IsABIRegCopy);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
@@ -366,12 +399,14 @@
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT,
const Value *V,
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND,
+ bool IsABIRegCopy = false) {
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
- return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
+ return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
+ IsABIRegCopy);
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
@@ -496,7 +531,9 @@
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
- MVT PartVT, const Value *V) {
+ MVT PartVT, const Value *V,
+ bool IsABIRegCopy) {
+
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -537,13 +574,20 @@
// Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
- } else{
- // Vector -> scalar conversion.
- assert(ValueVT.getVectorNumElements() == 1 &&
- "Only trivial vector-to-scalar conversions should get here!");
- Val = DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
- DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+ } else {
+ if (ValueVT.getVectorNumElements() == 1) {
+ Val = DAG.getNode(
+ ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
+ DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+
+ } else {
+ assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
+ "lossy conversion of vector to scalar type");
+ EVT IntermediateType =
+ EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
+ Val = DAG.getBitcast(IntermediateType, Val);
+ Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
+ }
}
assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
@@ -555,15 +599,31 @@
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
- unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
- IntermediateVT,
- NumIntermediates, RegisterVT);
+ unsigned NumRegs;
+ if (IsABIRegCopy) {
+ NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
+ *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates,
+ RegisterVT);
+ } else {
+ NumRegs =
+ TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
+ NumIntermediates, RegisterVT);
+ }
unsigned NumElements = ValueVT.getVectorNumElements();
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
+ // Convert the vector to the appropiate type if necessary.
+ unsigned DestVectorNoElts =
+ NumIntermediates *
+ (IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1);
+ EVT BuiltVectorTy = EVT::getVectorVT(
+ *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
+ if (Val.getValueType() != BuiltVectorTy)
+ Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
+
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
@@ -596,22 +656,31 @@
}
}
-RegsForValue::RegsForValue() {}
+RegsForValue::RegsForValue() { IsABIMangled = false; }
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
- EVT valuevt)
- : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
+ EVT valuevt, bool IsABIMangledValue)
+ : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
+ RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
- const DataLayout &DL, unsigned Reg, Type *Ty) {
+ const DataLayout &DL, unsigned Reg, Type *Ty,
+ bool IsABIMangledValue) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
+ IsABIMangled = IsABIMangledValue;
+
for (EVT ValueVT : ValueVTs) {
- unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
- MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
+ unsigned NumRegs = IsABIMangledValue
+ ? TLI.getNumRegistersForCallingConv(Context, ValueVT)
+ : TLI.getNumRegisters(Context, ValueVT);
+ MVT RegisterVT = IsABIMangledValue
+ ? TLI.getRegisterTypeForCallingConv(Context, ValueVT)
+ : TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
+ RegCount.push_back(NumRegs);
Reg += NumRegs;
}
}
@@ -632,8 +701,10 @@
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
- MVT RegisterVT = RegVTs[Value];
+ unsigned NumRegs = RegCount[Value];
+ MVT RegisterVT = IsABIMangled
+ ? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
+ : RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
@@ -728,9 +799,11 @@
unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT ValueVT = ValueVTs[Value];
- unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
- MVT RegisterVT = RegVTs[Value];
+ unsigned NumParts = RegCount[Value];
+
+ MVT RegisterVT = IsABIMangled
+ ? TLI.getRegisterTypeForCallingConv(RegVTs[Value])
+ : RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
@@ -953,10 +1026,16 @@
if (It != FuncInfo.ValueMap.end()) {
unsigned InReg = It->second;
+ bool IsABIRegCopy =
+ V && ((isa<CallInst>(V) &&
+ !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
+ isa<ReturnInst>(V));
+
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), InReg, Ty);
+ DAG.getDataLayout(), InReg, Ty, IsABIRegCopy);
SDValue Chain = DAG.getEntryNode();
- Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
+ Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
+ V);
resolveDanglingDebugInfo(V, Result);
}
@@ -1142,8 +1221,13 @@
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
+ bool IsABIRegCopy =
+ V && ((isa<CallInst>(V) &&
+ !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
+ isa<ReturnInst>(V));
+
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
- Inst->getType());
+ Inst->getType(), IsABIRegCopy);
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
@@ -1371,12 +1455,12 @@
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
- unsigned NumParts = TLI.getNumRegisters(Context, VT);
- MVT PartVT = TLI.getRegisterType(Context, VT);
+ unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT);
+ MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
- &Parts[0], NumParts, PartVT, &I, ExtendKind);
+ &Parts[0], NumParts, PartVT, &I, ExtendKind, true);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
@@ -7112,8 +7196,8 @@
SDLoc dl = getCurSDLoc();
// Use the produced MatchedRegs object to
- MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
- Chain, &Flag, CS.getInstruction());
+ MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
+ CS.getInstruction());
MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(), dl,
DAG, AsmNodeOperands);
@@ -7799,8 +7883,10 @@
} else {
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
+ MVT RegisterVT =
+ getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs =
+ getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags;
MyFlags.VT = RegisterVT;
@@ -7849,7 +7935,11 @@
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
+
+ // Certain targets (such as MIPS), may have a different ABI alignment
+ // for a type depending on the context. Give the target a chance to
+ // specify the alignment it wants.
+ unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
if (Args[i].IsZExt)
Flags.setZExt();
@@ -7904,8 +7994,9 @@
Flags.setInConsecutiveRegs();
Flags.setOrigAlign(OriginalAlignment);
- MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
- unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
+ MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
+ unsigned NumParts =
+ getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
SmallVector<SDValue, 4> Parts(NumParts);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
@@ -7935,7 +8026,8 @@
}
getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
- CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
+ CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind,
+ true);
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
@@ -8035,12 +8127,14 @@
unsigned CurReg = 0;
for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
EVT VT = RetTys[I];
- MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
- unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
+ MVT RegisterVT =
+ getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT);
+ unsigned NumRegs =
+ getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT);
ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
NumRegs, RegisterVT, VT, nullptr,
- AssertOp));
+ AssertOp, true));
CurReg += NumRegs;
}
@@ -8076,8 +8170,15 @@
assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ // If this is an InlineAsm we have to match the registers required, not the
+ // notional registers required by the type.
+ bool IsABIRegCopy =
+ V && ((isa<CallInst>(V) &&
+ !(static_cast<const CallInst *>(V))->isInlineAsm()) ||
+ isa<ReturnInst>(V));
+
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
- V->getType());
+ V->getType(), IsABIRegCopy);
SDValue Chain = DAG.getEntryNode();
ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
@@ -8319,7 +8420,12 @@
EVT VT = ValueVTs[Value];
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
+
+ // Certain targets (such as MIPS), may have a different ABI alignment
+ // for a type depending on the context. Give the target a chance to
+ // specify the alignment it wants.
+ unsigned OriginalAlignment =
+ TLI->getABIAlignmentForCallingConv(ArgTy, DL);
if (Arg.hasAttribute(Attribute::ZExt))
Flags.setZExt();
@@ -8381,8 +8487,10 @@
if (ArgCopyElisionCandidates.count(&Arg))
Flags.setCopyElisionCandidate();
- MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
- unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
+ MVT RegisterVT =
+ TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
+ unsigned NumRegs =
+ TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
for (unsigned i = 0; i != NumRegs; ++i) {
ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
ArgNo, PartBase+i*RegisterVT.getStoreSize());
@@ -8486,8 +8594,10 @@
for (unsigned Val = 0; Val != NumValues; ++Val) {
EVT VT = ValueVTs[Val];
- MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
- unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
+ MVT PartVT =
+ TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT);
+ unsigned NumParts =
+ TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT);
// Even an apparant 'unused' swifterror argument needs to be returned. So
// we do generate a copy for it that can be used on return from the
@@ -8500,7 +8610,8 @@
AssertOp = ISD::AssertZext;
ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
- PartVT, VT, nullptr, AssertOp));
+ PartVT, VT, nullptr, AssertOp,
+ true));
}
i += NumParts;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 77e131f..431d52b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -975,18 +975,28 @@
/// expanded value requires multiple registers.
SmallVector<unsigned, 4> Regs;
+ /// This list holds the number of registers for each value.
+ SmallVector<unsigned, 4> RegCount;
+
+ /// Records if this value needs to be treated in an ABI dependant manner,
+ /// different to normal type legalization.
+ bool IsABIMangled;
+
RegsForValue();
- RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt);
+ RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, EVT valuevt,
+ bool IsABIMangledValue = false);
RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
- const DataLayout &DL, unsigned Reg, Type *Ty);
+ const DataLayout &DL, unsigned Reg, Type *Ty,
+ bool IsABIMangledValue = false);
/// Add the specified values to this one.
void append(const RegsForValue &RHS) {
ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
Regs.append(RHS.Regs.begin(), RHS.Regs.end());
+ RegCount.push_back(RHS.Regs.size());
}
/// Emit a series of CopyFromReg nodes that copies from this value and returns
diff --git a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index ac727ce..5d78bba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -840,7 +840,7 @@
// completely and make statepoint call to return a tuple.
unsigned Reg = FuncInfo.CreateRegs(RetTy);
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
- DAG.getDataLayout(), Reg, RetTy);
+ DAG.getDataLayout(), Reg, RetTy, true);
SDValue Chain = DAG.getEntryNode();
RFV.getCopyToRegs(ReturnValue, DAG, getCurSDLoc(), Chain, nullptr);