[GlobalISel][CallLowering] Add support for splitting types according to calling conventions.
On AArch64, s128 types have to be split into s64 GPRs when passed as arguments.
This change adds the generic support in call lowering for dealing with multiple
registers, for incoming and outgoing args.
Support for splitting for return types not yet implemented.
Differential Revision: https://reviews.llvm.org/D66180
llvm-svn: 370822
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index e389aaa..9ace33b 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -174,12 +174,13 @@
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info,
+ ISD::ArgFlagsTy Flags,
CCState &State) override {
bool Res;
if (Info.IsFixed)
- Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
else
- Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getNextStackOffset();
return Res;
@@ -208,7 +209,7 @@
// No splitting to do, but we want to replace the original type (e.g. [1 x
// double] -> double).
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
- OrigArg.Flags, OrigArg.IsFixed);
+ OrigArg.Flags[0], OrigArg.IsFixed);
return;
}
@@ -219,13 +220,13 @@
OrigArg.Ty, CallConv, false);
for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
- SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
+ SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
OrigArg.IsFixed);
if (NeedsRegBlock)
- SplitArgs.back().Flags.setInConsecutiveRegs();
+ SplitArgs.back().Flags[0].setInConsecutiveRegs();
}
- SplitArgs.back().Flags.setInConsecutiveRegsLast();
+ SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
@@ -419,7 +420,7 @@
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, Info.CallConv);
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
if (OrigArg.Ty->isIntegerTy(1))
- SplitArgs.back().Flags.setZExt();
+ SplitArgs.back().Flags[0].setZExt();
}
// Find out which ABI gets to decide where things go.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 6b194b1..df235cb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -64,8 +64,9 @@
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info,
+ ISD::ArgFlagsTy Flags,
CCState &State) override {
- return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
}
};
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 3f1c7d4..52dba00 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -169,8 +169,9 @@
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, CCState &State) override {
- if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State))
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State))
return true;
StackSize =
@@ -199,7 +200,7 @@
if (SplitVTs.size() == 1) {
// Even if there is no splitting to do, we still want to replace the
// original type (e.g. pointer type -> integer).
- auto Flags = OrigArg.Flags;
+ auto Flags = OrigArg.Flags[0];
unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty);
Flags.setOrigAlign(OriginalAlignment);
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
@@ -211,7 +212,7 @@
for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) {
EVT SplitVT = SplitVTs[i];
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
- auto Flags = OrigArg.Flags;
+ auto Flags = OrigArg.Flags[0];
unsigned OriginalAlignment = DL.getABITypeAlignment(SplitTy);
Flags.setOrigAlign(OriginalAlignment);
@@ -547,7 +548,7 @@
if (!Arg.IsFixed)
IsVarArg = true;
- if (Arg.Flags.isByVal())
+ if (Arg.Flags[0].isByVal())
return false;
splitToValueTypes(Arg, ArgInfos, MF);
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 8691365..0d0e446 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -508,7 +508,7 @@
for (auto &Arg : Info.OrigArgs) {
if (!isSupportedType(Arg.Ty))
return false;
- if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
+ if (Arg.Flags[0].isByVal() || Arg.Flags[0].isSRet())
return false;
}
@@ -641,7 +641,7 @@
F.getContext(), F.getCallingConv(), VT);
for (unsigned i = 0; i < NumRegs; ++i) {
- ISD::ArgFlagsTy Flags = Arg.Flags;
+ ISD::ArgFlagsTy Flags = Arg.Flags[0];
if (i == 0)
Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index d559b2e..1dddf78 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -155,8 +155,9 @@
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, CCState &State) override {
- bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getNextStackOffset();
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
@@ -405,7 +406,7 @@
for (const auto &OrigArg : Info.OrigArgs) {
// TODO: handle not simple cases.
- if (OrigArg.Flags.isByVal())
+ if (OrigArg.Flags[0].isByVal())
return false;
if (OrigArg.Regs.size() > 1)