GlobalISel: Remove unsigned variant of SrcOp
Force using Register.
One downside is the generated register enums require explicit
conversion.
llvm-svn: 364194
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index 1e1c20d..a497bd9 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -57,18 +57,18 @@
CCAssignFn *AssignFn)
: ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
auto &MFI = MIRBuilder.getMF().getFrameInfo();
int FI = MFI.CreateFixedObject(Size, Offset, true);
MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
- unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
+ Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
MIRBuilder.buildFrameIndex(AddrReg, FI);
StackUsed = std::max(StackUsed, Size + Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
markPhysRegUsed(PhysReg);
switch (VA.getLocInfo()) {
@@ -85,7 +85,7 @@
}
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
// FIXME: Get alignment
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
@@ -133,31 +133,31 @@
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
AssignFnVarArg(AssignFnVarArg), StackSize(0) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
LLT p0 = LLT::pointer(0, 64);
LLT s64 = LLT::scalar(64);
- unsigned SPReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildCopy(SPReg, AArch64::SP);
+ Register SPReg = MRI.createGenericVirtualRegister(p0);
+ MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
- unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
+ Register OffsetReg = MRI.createGenericVirtualRegister(s64);
MIRBuilder.buildConstant(OffsetReg, Offset);
- unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
MIB.addUse(PhysReg, RegState::Implicit);
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
Size = VA.getLocVT().getSizeInBits() / 8;
@@ -263,7 +263,7 @@
return false;
}
- unsigned CurVReg = VRegs[i];
+ Register CurVReg = VRegs[i];
ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
@@ -367,7 +367,7 @@
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
bool Split = false;
LLT Ty = MRI.getType(VRegs[i]);
- unsigned Dst = VRegs[i];
+ Register Dst = VRegs[i];
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
[&](unsigned Reg, uint64_t Offset) {
@@ -436,7 +436,7 @@
SmallVector<ArgInfo, 8> SplitArgs;
for (auto &OrigArg : OrigArgs) {
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
- [&](unsigned Reg, uint64_t Offset) {
+ [&](Register Reg, uint64_t Offset) {
MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset);
});
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
@@ -512,7 +512,7 @@
if (SwiftErrorVReg) {
MIB.addDef(AArch64::X21, RegState::Implicit);
- MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21);
+ MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21));
}
CallSeqStart.addImm(Handler.StackSize).addImm(0);
diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index 3c8d0f1..c87267a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -74,7 +74,7 @@
// returned via 'Dst'.
MachineInstr *emitScalarToVector(unsigned EltSize,
const TargetRegisterClass *DstRC,
- unsigned Scalar,
+ Register Scalar,
MachineIRBuilder &MIRBuilder) const;
/// Emit a lane insert into \p DstReg, or a new vector register if None is
@@ -83,8 +83,8 @@
/// The lane inserted into is defined by \p LaneIdx. The vector source
/// register is given by \p SrcReg. The register containing the element is
/// given by \p EltReg.
- MachineInstr *emitLaneInsert(Optional<unsigned> DstReg, unsigned SrcReg,
- unsigned EltReg, unsigned LaneIdx,
+ MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
+ Register EltReg, unsigned LaneIdx,
const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const;
bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const;
@@ -113,12 +113,12 @@
MachineIRBuilder &MIRBuilder) const;
// Emit a vector concat operation.
- MachineInstr *emitVectorConcat(Optional<unsigned> Dst, unsigned Op1,
- unsigned Op2,
+ MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
+ Register Op2,
MachineIRBuilder &MIRBuilder) const;
- MachineInstr *emitExtractVectorElt(Optional<unsigned> DstReg,
+ MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
const RegisterBank &DstRB, LLT ScalarTy,
- unsigned VecReg, unsigned LaneIdx,
+ Register VecReg, unsigned LaneIdx,
MachineIRBuilder &MIRBuilder) const;
/// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be
@@ -128,7 +128,7 @@
MachineRegisterInfo &MRI) const;
/// Emit a CSet for a compare.
- MachineInstr *emitCSetForICMP(unsigned DefReg, unsigned Pred,
+ MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred,
MachineIRBuilder &MIRBuilder) const;
ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
@@ -861,7 +861,7 @@
bool AArch64InstructionSelector::selectCompareBranch(
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
- const unsigned CondReg = I.getOperand(0).getReg();
+ const Register CondReg = I.getOperand(0).getReg();
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
MachineInstr *CCMI = MRI.getVRegDef(CondReg);
if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
@@ -869,8 +869,8 @@
if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
return false;
- unsigned LHS = CCMI->getOperand(2).getReg();
- unsigned RHS = CCMI->getOperand(3).getReg();
+ Register LHS = CCMI->getOperand(2).getReg();
+ Register RHS = CCMI->getOperand(3).getReg();
if (!getConstantVRegVal(RHS, MRI))
std::swap(RHS, LHS);
@@ -907,10 +907,10 @@
bool AArch64InstructionSelector::selectVectorSHL(
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert(I.getOpcode() == TargetOpcode::G_SHL);
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
const LLT Ty = MRI.getType(DstReg);
- unsigned Src1Reg = I.getOperand(1).getReg();
- unsigned Src2Reg = I.getOperand(2).getReg();
+ Register Src1Reg = I.getOperand(1).getReg();
+ Register Src2Reg = I.getOperand(2).getReg();
if (!Ty.isVector())
return false;
@@ -935,10 +935,10 @@
bool AArch64InstructionSelector::selectVectorASHR(
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert(I.getOpcode() == TargetOpcode::G_ASHR);
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
const LLT Ty = MRI.getType(DstReg);
- unsigned Src1Reg = I.getOperand(1).getReg();
- unsigned Src2Reg = I.getOperand(2).getReg();
+ Register Src1Reg = I.getOperand(1).getReg();
+ Register Src2Reg = I.getOperand(2).getReg();
if (!Ty.isVector())
return false;
@@ -980,9 +980,9 @@
bool AArch64InstructionSelector::selectVaStartDarwin(
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
- unsigned ListReg = I.getOperand(0).getReg();
+ Register ListReg = I.getOperand(0).getReg();
- unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
auto MIB =
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
@@ -1036,7 +1036,7 @@
constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
return DstReg;
};
- unsigned DstReg = BuildMovK(MovZ.getReg(0),
+ Register DstReg = BuildMovK(MovZ.getReg(0),
AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
@@ -1061,7 +1061,7 @@
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
- const unsigned DefReg = I.getOperand(0).getReg();
+ const Register DefReg = I.getOperand(0).getReg();
const LLT DefTy = MRI.getType(DefReg);
const TargetRegisterClass *DefRC = nullptr;
@@ -1122,7 +1122,7 @@
return false;
}
- const unsigned CondReg = I.getOperand(0).getReg();
+ const Register CondReg = I.getOperand(0).getReg();
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
// Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
@@ -1167,7 +1167,7 @@
case TargetOpcode::G_BSWAP: {
// Handle vector types for G_BSWAP directly.
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
// We should only get vector types here; everything else is handled by the
@@ -1212,7 +1212,7 @@
const LLT s64 = LLT::scalar(64);
const LLT p0 = LLT::pointer(0, 64);
- const unsigned DefReg = I.getOperand(0).getReg();
+ const Register DefReg = I.getOperand(0).getReg();
const LLT DefTy = MRI.getType(DefReg);
const unsigned DefSize = DefTy.getSizeInBits();
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
@@ -1270,7 +1270,7 @@
return true;
// Nope. Emit a copy and use a normal mov instead.
- const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
+ const Register DefGPRReg = MRI.createVirtualRegister(&GPRRC);
MachineOperand &RegOp = I.getOperand(0);
RegOp.setReg(DefGPRReg);
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
@@ -1317,7 +1317,7 @@
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
- unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
+ Register DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {})
.addReg(DstReg, 0, AArch64::sub_32);
@@ -1349,7 +1349,7 @@
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
- unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
+ Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
TII.get(AArch64::SUBREG_TO_REG))
.addDef(SrcReg)
@@ -1427,7 +1427,7 @@
}
unsigned MemSizeInBits = MemOp.getSize() * 8;
- const unsigned PtrReg = I.getOperand(1).getReg();
+ const Register PtrReg = I.getOperand(1).getReg();
#ifndef NDEBUG
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
// Sanity-check the pointer register.
@@ -1437,7 +1437,7 @@
"Load/Store pointer operand isn't a pointer");
#endif
- const unsigned ValReg = I.getOperand(0).getReg();
+ const Register ValReg = I.getOperand(0).getReg();
const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
const unsigned NewOpc =
@@ -1488,8 +1488,8 @@
return false;
// If we have a ZEXTLOAD then change the load's type to be a narrower reg
//and zero_extend with SUBREG_TO_REG.
- unsigned LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
- unsigned DstReg = I.getOperand(0).getReg();
+ Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
+ Register DstReg = I.getOperand(0).getReg();
I.getOperand(0).setReg(LdReg);
MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator()));
@@ -1510,7 +1510,7 @@
if (unsupportedBinOp(I, RBI, MRI, TRI))
return false;
- const unsigned DefReg = I.getOperand(0).getReg();
+ const Register DefReg = I.getOperand(0).getReg();
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
if (RB.getID() != AArch64::GPRRegBankID) {
@@ -1555,7 +1555,7 @@
const unsigned OpSize = Ty.getSizeInBits();
- const unsigned DefReg = I.getOperand(0).getReg();
+ const Register DefReg = I.getOperand(0).getReg();
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
@@ -1600,7 +1600,7 @@
// this case, we want to increment when carry is set.
auto CsetMI = MIRBuilder
.buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()},
- {AArch64::WZR, AArch64::WZR})
+ {Register(AArch64::WZR), Register(AArch64::WZR)})
.addImm(getInvertedCondCode(AArch64CC::HS));
constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI);
I.eraseFromParent();
@@ -1623,8 +1623,8 @@
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
- const unsigned DstReg = I.getOperand(0).getReg();
- const unsigned SrcReg = I.getOperand(1).getReg();
+ const Register DstReg = I.getOperand(0).getReg();
+ const Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
@@ -1681,8 +1681,8 @@
}
case TargetOpcode::G_ANYEXT: {
- const unsigned DstReg = I.getOperand(0).getReg();
- const unsigned SrcReg = I.getOperand(1).getReg();
+ const Register DstReg = I.getOperand(0).getReg();
+ const Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
if (RBDst.getID() != AArch64::GPRRegBankID) {
@@ -1713,7 +1713,7 @@
// At this point G_ANYEXT is just like a plain COPY, but we need
// to explicitly form the 64-bit value if any.
if (DstSize > 32) {
- unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
+ Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
.addDef(ExtSrc)
.addImm(0)
@@ -1730,8 +1730,8 @@
const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
SrcTy = MRI.getType(I.getOperand(1).getReg());
const bool isSigned = Opcode == TargetOpcode::G_SEXT;
- const unsigned DefReg = I.getOperand(0).getReg();
- const unsigned SrcReg = I.getOperand(1).getReg();
+ const Register DefReg = I.getOperand(0).getReg();
+ const Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
if (RB.getID() != AArch64::GPRRegBankID) {
@@ -1749,7 +1749,7 @@
return false;
}
- const unsigned SrcXReg =
+ const Register SrcXReg =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
.addDef(SrcXReg)
@@ -1817,9 +1817,9 @@
return false;
}
- const unsigned CondReg = I.getOperand(1).getReg();
- const unsigned TReg = I.getOperand(2).getReg();
- const unsigned FReg = I.getOperand(3).getReg();
+ const Register CondReg = I.getOperand(1).getReg();
+ const Register TReg = I.getOperand(2).getReg();
+ const Register FReg = I.getOperand(3).getReg();
// If we have a floating-point result, then we should use a floating point
// select instead of an integer select.
@@ -1829,7 +1829,7 @@
if (IsFP && tryOptSelect(I))
return true;
- unsigned CSelOpc = selectSelectOpc(I, MRI, RBI);
+ Register CSelOpc = selectSelectOpc(I, MRI, RBI);
MachineInstr &TstMI =
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
.addDef(AArch64::WZR)
@@ -1859,7 +1859,7 @@
}
unsigned CmpOpc = 0;
- unsigned ZReg = 0;
+ Register ZReg;
// Check if this compare can be represented as a cmn, and perform any
// necessary transformations to do so.
@@ -1930,8 +1930,8 @@
if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri)
CmpMI = CmpMI.addUse(I.getOperand(3).getReg());
- const unsigned DefReg = I.getOperand(0).getReg();
- unsigned Def1Reg = DefReg;
+ const Register DefReg = I.getOperand(0).getReg();
+ Register Def1Reg = DefReg;
if (CC2 != AArch64CC::AL)
Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
@@ -1943,7 +1943,7 @@
.addImm(getInvertedCondCode(CC1));
if (CC2 != AArch64CC::AL) {
- unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
+ Register Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
MachineInstr &CSet2MI =
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
.addDef(Def2Reg)
@@ -1974,7 +1974,7 @@
case TargetOpcode::G_IMPLICIT_DEF: {
I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
- const unsigned DstReg = I.getOperand(0).getReg();
+ const Register DstReg = I.getOperand(0).getReg();
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
const TargetRegisterClass *DstRC =
getRegClassForTypeOnBank(DstTy, DstRB, RBI);
@@ -2027,13 +2027,13 @@
bool AArch64InstructionSelector::selectBrJT(MachineInstr &I,
MachineRegisterInfo &MRI) const {
assert(I.getOpcode() == TargetOpcode::G_BRJT && "Expected G_BRJT");
- unsigned JTAddr = I.getOperand(0).getReg();
+ Register JTAddr = I.getOperand(0).getReg();
unsigned JTI = I.getOperand(1).getIndex();
- unsigned Index = I.getOperand(2).getReg();
+ Register Index = I.getOperand(2).getReg();
MachineIRBuilder MIB(I);
- unsigned TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
- unsigned ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
+ Register TargetReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
+ Register ScratchReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
MIB.buildInstr(AArch64::JumpTableDest32, {TargetReg, ScratchReg},
{JTAddr, Index})
.addJumpTableIndex(JTI);
@@ -2049,7 +2049,7 @@
assert(I.getOpcode() == TargetOpcode::G_JUMP_TABLE && "Expected jump table");
assert(I.getOperand(1).isJTI() && "Jump table op should have a JTI!");
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
unsigned JTI = I.getOperand(1).getIndex();
// We generate a MOVaddrJT which will get expanded to an ADRP + ADD later.
MachineIRBuilder MIB(I);
@@ -2173,10 +2173,10 @@
bool AArch64InstructionSelector::selectVectorICmp(
MachineInstr &I, MachineRegisterInfo &MRI) const {
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- unsigned SrcReg = I.getOperand(2).getReg();
- unsigned Src2Reg = I.getOperand(3).getReg();
+ Register SrcReg = I.getOperand(2).getReg();
+ Register Src2Reg = I.getOperand(3).getReg();
LLT SrcTy = MRI.getType(SrcReg);
unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits();
@@ -2344,7 +2344,7 @@
}
MachineInstr *AArch64InstructionSelector::emitScalarToVector(
- unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar,
+ unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar,
MachineIRBuilder &MIRBuilder) const {
auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {});
@@ -2387,14 +2387,14 @@
return false;
auto *DstRC = &AArch64::GPR64RegClass;
- unsigned SubToRegDef = MRI.createVirtualRegister(DstRC);
+ Register SubToRegDef = MRI.createVirtualRegister(DstRC);
MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(TargetOpcode::SUBREG_TO_REG))
.addDef(SubToRegDef)
.addImm(0)
.addUse(I.getOperand(1).getReg())
.addImm(AArch64::sub_32);
- unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC);
+ Register SubToRegDef2 = MRI.createVirtualRegister(DstRC);
// Need to anyext the second scalar before we can use bfm
MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(TargetOpcode::SUBREG_TO_REG))
@@ -2442,8 +2442,8 @@
}
MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
- Optional<unsigned> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
- unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
+ Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
+ Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
unsigned CopyOpc = 0;
unsigned ExtractSubReg = 0;
@@ -2470,7 +2470,7 @@
}
// The register that we're going to copy into.
- unsigned InsertReg = VecReg;
+ Register InsertReg = VecReg;
if (!DstReg)
DstReg = MRI.createVirtualRegister(DstRC);
// If the lane index is 0, we just use a subregister COPY.
@@ -2505,9 +2505,9 @@
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT &&
"unexpected opcode!");
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
const LLT NarrowTy = MRI.getType(DstReg);
- const unsigned SrcReg = I.getOperand(1).getReg();
+ const Register SrcReg = I.getOperand(1).getReg();
const LLT WideTy = MRI.getType(SrcReg);
(void)WideTy;
assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() &&
@@ -2544,7 +2544,7 @@
bool AArch64InstructionSelector::selectSplitVectorUnmerge(
MachineInstr &I, MachineRegisterInfo &MRI) const {
unsigned NumElts = I.getNumOperands() - 1;
- unsigned SrcReg = I.getOperand(NumElts).getReg();
+ Register SrcReg = I.getOperand(NumElts).getReg();
const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
const LLT SrcTy = MRI.getType(SrcReg);
@@ -2561,7 +2561,7 @@
const RegisterBank &DstRB =
*RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI);
for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) {
- unsigned Dst = I.getOperand(OpIdx).getReg();
+ Register Dst = I.getOperand(OpIdx).getReg();
MachineInstr *Extract =
emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB);
if (!Extract)
@@ -2589,7 +2589,7 @@
// The last operand is the vector source register, and every other operand is
// a register to unpack into.
unsigned NumElts = I.getNumOperands() - 1;
- unsigned SrcReg = I.getOperand(NumElts).getReg();
+ Register SrcReg = I.getOperand(NumElts).getReg();
const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg());
const LLT WideTy = MRI.getType(SrcReg);
(void)WideTy;
@@ -2613,7 +2613,7 @@
MachineBasicBlock &MBB = *I.getParent();
// Stores the registers we'll be copying from.
- SmallVector<unsigned, 4> InsertRegs;
+ SmallVector<Register, 4> InsertRegs;
// We'll use the first register twice, so we only need NumElts-1 registers.
unsigned NumInsertRegs = NumElts - 1;
@@ -2622,18 +2622,18 @@
// directly. Otherwise, we need to do a bit of setup with some subregister
// inserts.
if (NarrowTy.getSizeInBits() * NumElts == 128) {
- InsertRegs = SmallVector<unsigned, 4>(NumInsertRegs, SrcReg);
+ InsertRegs = SmallVector<Register, 4>(NumInsertRegs, SrcReg);
} else {
// No. We have to perform subregister inserts. For each insert, create an
// implicit def and a subregister insert, and save the register we create.
for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) {
- unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
+ Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
MachineInstr &ImpDefMI =
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF),
ImpDefReg);
// Now, create the subregister insert from SrcReg.
- unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
+ Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass);
MachineInstr &InsMI =
*BuildMI(MBB, I, I.getDebugLoc(),
TII.get(TargetOpcode::INSERT_SUBREG), InsertReg)
@@ -2653,15 +2653,15 @@
// create the copies.
//
// Perform the first copy separately as a subregister copy.
- unsigned CopyTo = I.getOperand(0).getReg();
+ Register CopyTo = I.getOperand(0).getReg();
auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {})
.addReg(InsertRegs[0], 0, ExtractSubReg);
constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI);
// Now, perform the remaining copies as vector lane copies.
unsigned LaneIdx = 1;
- for (unsigned InsReg : InsertRegs) {
- unsigned CopyTo = I.getOperand(LaneIdx).getReg();
+ for (Register InsReg : InsertRegs) {
+ Register CopyTo = I.getOperand(LaneIdx).getReg();
MachineInstr &CopyInst =
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo)
.addUse(InsReg)
@@ -2689,9 +2689,9 @@
MachineInstr &I, MachineRegisterInfo &MRI) const {
assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
"Unexpected opcode");
- unsigned Dst = I.getOperand(0).getReg();
- unsigned Op1 = I.getOperand(1).getReg();
- unsigned Op2 = I.getOperand(2).getReg();
+ Register Dst = I.getOperand(0).getReg();
+ Register Op1 = I.getOperand(1).getReg();
+ Register Op2 = I.getOperand(2).getReg();
MachineIRBuilder MIRBuilder(I);
MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder);
if (!ConcatMI)
@@ -2807,7 +2807,7 @@
}
MachineInstr *AArch64InstructionSelector::emitVectorConcat(
- Optional<unsigned> Dst, unsigned Op1, unsigned Op2,
+ Optional<Register> Dst, Register Op1, Register Op2,
MachineIRBuilder &MIRBuilder) const {
// We implement a vector concat by:
// 1. Use scalar_to_vector to insert the lower vector into the larger dest
@@ -2900,14 +2900,14 @@
}
MachineInstr *
-AArch64InstructionSelector::emitCSetForICMP(unsigned DefReg, unsigned Pred,
+AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred,
MachineIRBuilder &MIRBuilder) const {
// CSINC increments the result when the predicate is false. Invert it.
const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC(
CmpInst::getInversePredicate((CmpInst::Predicate)Pred));
auto I =
MIRBuilder
- .buildInstr(AArch64::CSINCWr, {DefReg}, {AArch64::WZR, AArch64::WZR})
+ .buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)})
.addImm(InvCC);
constrainSelectedInstRegOperands(*I, TII, TRI, RBI);
return &*I;
@@ -3011,7 +3011,7 @@
// cmn z, y
// Helper lambda to find the def.
- auto FindDef = [&](unsigned VReg) {
+ auto FindDef = [&](Register VReg) {
MachineInstr *Def = MRI.getVRegDef(VReg);
while (Def) {
if (Def->getOpcode() != TargetOpcode::COPY)
@@ -3091,7 +3091,7 @@
(MRI.getType(I.getOperand(2).getReg()).getSizeInBits() == 32);
auto ImmFns = selectArithImmed(I.getOperand(3));
unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()];
- unsigned ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
+ Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR;
auto CmpMI = MIRBuilder.buildInstr(Opc, {ZReg}, {I.getOperand(2).getReg()});
@@ -3145,7 +3145,7 @@
if (!UndefMI)
return false;
// Match the scalar being splatted.
- unsigned ScalarReg = InsMI->getOperand(2).getReg();
+ Register ScalarReg = InsMI->getOperand(2).getReg();
const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI);
// Match the index constant 0.
int64_t Index = 0;
@@ -3206,9 +3206,9 @@
if (tryOptVectorShuffle(I))
return true;
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
- unsigned Src1Reg = I.getOperand(1).getReg();
+ Register Src1Reg = I.getOperand(1).getReg();
const LLT Src1Ty = MRI.getType(Src1Reg);
- unsigned Src2Reg = I.getOperand(2).getReg();
+ Register Src2Reg = I.getOperand(2).getReg();
const LLT Src2Ty = MRI.getType(Src2Reg);
MachineBasicBlock &MBB = *I.getParent();
@@ -3302,7 +3302,7 @@
}
MachineInstr *AArch64InstructionSelector::emitLaneInsert(
- Optional<unsigned> DstReg, unsigned SrcReg, unsigned EltReg,
+ Optional<Register> DstReg, Register SrcReg, Register EltReg,
unsigned LaneIdx, const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const {
MachineInstr *InsElt = nullptr;
@@ -3337,12 +3337,12 @@
assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
// Get information on the destination.
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
const LLT DstTy = MRI.getType(DstReg);
unsigned VecSize = DstTy.getSizeInBits();
// Get information on the element we want to insert into the destination.
- unsigned EltReg = I.getOperand(2).getReg();
+ Register EltReg = I.getOperand(2).getReg();
const LLT EltTy = MRI.getType(EltReg);
unsigned EltSize = EltTy.getSizeInBits();
if (EltSize < 16 || EltSize > 64)
@@ -3350,14 +3350,14 @@
// Find the definition of the index. Bail out if it's not defined by a
// G_CONSTANT.
- unsigned IdxReg = I.getOperand(3).getReg();
+ Register IdxReg = I.getOperand(3).getReg();
auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI);
if (!VRegAndVal)
return false;
unsigned LaneIdx = VRegAndVal->Value;
// Perform the lane insert.
- unsigned SrcReg = I.getOperand(1).getReg();
+ Register SrcReg = I.getOperand(1).getReg();
const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI);
MachineIRBuilder MIRBuilder(I);
@@ -3380,7 +3380,7 @@
if (VecSize < 128) {
// If we had to widen to perform the insert, then we have to demote back to
// the original size to get the result we want.
- unsigned DemoteVec = InsMI->getOperand(0).getReg();
+ Register DemoteVec = InsMI->getOperand(0).getReg();
const TargetRegisterClass *RC =
getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize);
if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) {
@@ -3428,7 +3428,7 @@
if (!ScalarToVec)
return false;
- unsigned DstVec = ScalarToVec->getOperand(0).getReg();
+ Register DstVec = ScalarToVec->getOperand(0).getReg();
unsigned DstSize = DstTy.getSizeInBits();
// Keep track of the last MI we inserted. Later on, we might be able to save
@@ -3464,8 +3464,8 @@
return false;
}
- unsigned Reg = MRI.createVirtualRegister(RC);
- unsigned DstReg = I.getOperand(0).getReg();
+ Register Reg = MRI.createVirtualRegister(RC);
+ Register DstReg = I.getOperand(0).getReg();
MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {})
.addReg(DstVec, 0, SubReg);
@@ -3531,17 +3531,17 @@
MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(0xF000);
break;
case Intrinsic::aarch64_stlxr:
- unsigned StatReg = I.getOperand(0).getReg();
+ Register StatReg = I.getOperand(0).getReg();
assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 &&
"Status register must be 32 bits!");
- unsigned SrcReg = I.getOperand(2).getReg();
+ Register SrcReg = I.getOperand(2).getReg();
if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) {
LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n");
return false;
}
- unsigned PtrReg = I.getOperand(3).getReg();
+ Register PtrReg = I.getOperand(3).getReg();
assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand");
// Expect only one memory operand.
@@ -3573,8 +3573,8 @@
default:
break;
case Intrinsic::aarch64_crypto_sha1h:
- unsigned DstReg = I.getOperand(0).getReg();
- unsigned SrcReg = I.getOperand(2).getReg();
+ Register DstReg = I.getOperand(0).getReg();
+ Register SrcReg = I.getOperand(2).getReg();
// FIXME: Should this be an assert?
if (MRI.getType(DstReg).getSizeInBits() != 32 ||
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 5751a89..c2340d2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -37,17 +37,17 @@
MachineInstrBuilder MIB;
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
llvm_unreachable("not implemented");
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
llvm_unreachable("not implemented");
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
MIB.addUse(PhysReg);
MIRBuilder.buildCopy(PhysReg, ValVReg);
@@ -111,7 +111,7 @@
return true;
}
-unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
+Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
Type *ParamTy,
uint64_t Offset) const {
@@ -122,12 +122,12 @@
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
LLT PtrType = getLLTForType(*PtrTy, DL);
- unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
- unsigned KernArgSegmentPtr =
+ Register DstReg = MRI.createGenericVirtualRegister(PtrType);
+ Register KernArgSegmentPtr =
MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
- unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
+ Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
- unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
+ Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
MIRBuilder.buildConstant(OffsetReg, Offset);
MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
@@ -156,7 +156,7 @@
MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
}
-static unsigned findFirstFreeSGPR(CCState &CCInfo) {
+static Register findFirstFreeSGPR(CCState &CCInfo) {
unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
@@ -215,27 +215,27 @@
// FIXME: How should these inputs interact with inreg / custom SGPR inputs?
if (Info->hasPrivateSegmentBuffer()) {
- unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
+ Register PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
CCInfo.AllocateReg(PrivateSegmentBufferReg);
}
if (Info->hasDispatchPtr()) {
- unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
+ Register DispatchPtrReg = Info->addDispatchPtr(*TRI);
// FIXME: Need to add reg as live-in
CCInfo.AllocateReg(DispatchPtrReg);
}
if (Info->hasQueuePtr()) {
- unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
+ Register QueuePtrReg = Info->addQueuePtr(*TRI);
// FIXME: Need to add reg as live-in
CCInfo.AllocateReg(QueuePtrReg);
}
if (Info->hasKernargSegmentPtr()) {
- unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
+ Register InputPtrReg = Info->addKernargSegmentPtr(*TRI);
const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
- unsigned VReg = MRI.createGenericVirtualRegister(P2);
+ Register VReg = MRI.createGenericVirtualRegister(P2);
MRI.addLiveIn(InputPtrReg, VReg);
MIRBuilder.getMBB().addLiveIn(InputPtrReg);
MIRBuilder.buildCopy(VReg, InputPtrReg);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
index 99b8244..b46e54b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.h
@@ -22,7 +22,7 @@
class AMDGPUTargetLowering;
class AMDGPUCallLowering: public CallLowering {
- unsigned lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy,
+ Register lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy,
uint64_t Offset) const;
void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index ca415833..81ead53 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -835,12 +835,12 @@
auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
- unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
+ Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
// Extract low 32-bits of the pointer.
MIRBuilder.buildExtract(PtrLo32, Src, 0);
- unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
+ Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
@@ -856,15 +856,15 @@
auto FlatNull =
MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
- unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
+ Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
- unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
+ Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
- unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);
+ Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
// Coerce the type of the low half of the result so we can use merge_values.
- unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
.addDef(SrcAsInt)
.addUse(Src);
@@ -883,7 +883,7 @@
MachineIRBuilder &MIRBuilder) const {
MIRBuilder.setInstr(MI);
- unsigned Src = MI.getOperand(1).getReg();
+ Register Src = MI.getOperand(1).getReg();
LLT Ty = MRI.getType(Src);
assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
@@ -913,7 +913,7 @@
const LLT S1 = LLT::scalar(1);
const LLT S64 = LLT::scalar(64);
- unsigned Src = MI.getOperand(1).getReg();
+ Register Src = MI.getOperand(1).getReg();
assert(MRI.getType(Src) == S64);
// result = trunc(src)
@@ -959,12 +959,12 @@
const LLT S32 = LLT::scalar(32);
const LLT S64 = LLT::scalar(64);
- unsigned Src = MI.getOperand(1).getReg();
+ Register Src = MI.getOperand(1).getReg();
assert(MRI.getType(Src) == S64);
// TODO: Should this use extract since the low half is unused?
auto Unmerge = B.buildUnmerge({S32, S32}, Src);
- unsigned Hi = Unmerge.getReg(1);
+ Register Hi = Unmerge.getReg(1);
// Extract the upper half, since this is where we will find the sign and
// exponent.
@@ -1001,8 +1001,8 @@
MachineIRBuilder &B, bool Signed) const {
B.setInstr(MI);
- unsigned Dst = MI.getOperand(0).getReg();
- unsigned Src = MI.getOperand(1).getReg();
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
const LLT S64 = LLT::scalar(64);
const LLT S32 = LLT::scalar(32);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index a5e24ef..c8c40f0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -828,14 +828,14 @@
}
case AMDGPU::G_SEXT:
case AMDGPU::G_ZEXT: {
- unsigned SrcReg = MI.getOperand(1).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
if (SrcTy != LLT::scalar(1))
return;
MachineIRBuilder B(MI);
bool Signed = Opc == AMDGPU::G_SEXT;
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
const RegisterBank *SrcBank = getRegBank(SrcReg, MRI, *TRI);
if (SrcBank->getID() == AMDGPU::SCCRegBankID ||
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index c298576..fa54a9e 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -90,27 +90,27 @@
MachineInstrBuilder &MIB, CCAssignFn *AssignFn)
: ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
"Unsupported size");
LLT p0 = LLT::pointer(0, 32);
LLT s32 = LLT::scalar(32);
- unsigned SPReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildCopy(SPReg, ARM::SP);
+ Register SPReg = MRI.createGenericVirtualRegister(p0);
+ MIRBuilder.buildCopy(SPReg, Register(ARM::SP));
- unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
+ Register OffsetReg = MRI.createGenericVirtualRegister(s32);
MIRBuilder.buildConstant(OffsetReg, Offset);
- unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
@@ -118,17 +118,17 @@
assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
MIB.addUse(PhysReg, RegState::Implicit);
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
"Unsupported size");
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
/* Alignment */ 1);
@@ -298,7 +298,7 @@
bool isArgumentHandler() const override { return true; }
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
"Unsupported size");
@@ -315,7 +315,7 @@
return AddrReg;
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
"Unsupported size");
@@ -336,14 +336,14 @@
}
}
- void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment,
+ void buildLoad(Register Val, Register Addr, uint64_t Size, unsigned Alignment,
MachinePointerInfo &MPO) {
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOLoad, Size, Alignment);
MIRBuilder.buildLoad(Val, Addr, *MMO);
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
diff --git a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
index 701c736..796eaee 100644
--- a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -423,7 +423,7 @@
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
auto *RetTy = Type::getInt32Ty(Ctx);
- SmallVector<unsigned, 2> Results;
+ SmallVector<Register, 2> Results;
for (auto Libcall : Libcalls) {
auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32));
auto Status =
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 097ad0d..dc4eaf8 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -93,7 +93,7 @@
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
- unsigned getStackAddress(const CCValAssign &VA,
+ Register getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
@@ -134,7 +134,7 @@
const EVT &VT) {
const MipsSubtarget &STI =
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
- unsigned PhysReg = VA.getLocReg();
+ Register PhysReg = VA.getLocReg();
if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
const MipsSubtarget &STI =
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
@@ -173,7 +173,7 @@
}
}
-unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
+Register IncomingValueHandler::getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) {
MachineFunction &MF = MIRBuilder.getMF();
unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
@@ -188,7 +188,7 @@
unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
- unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
+ Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
MIRBuilder.buildFrameIndex(AddrReg, FI);
return AddrReg;
@@ -228,7 +228,7 @@
void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
- unsigned getStackAddress(const CCValAssign &VA,
+ Register getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
@@ -237,7 +237,7 @@
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
Register ArgsReg, const EVT &VT) override;
- unsigned extendRegister(Register ValReg, const CCValAssign &VA);
+ Register extendRegister(Register ValReg, const CCValAssign &VA);
MachineInstrBuilder &MIB;
};
@@ -274,13 +274,13 @@
.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
*STI.getRegBankInfo());
} else {
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
MIB.addUse(PhysReg, RegState::Implicit);
}
}
-unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
+Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) {
MachineFunction &MF = MIRBuilder.getMF();
const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
@@ -288,7 +288,7 @@
LLT p0 = LLT::pointer(0, 32);
LLT s32 = LLT::scalar(32);
Register SPReg = MRI.createGenericVirtualRegister(p0);
- MIRBuilder.buildCopy(SPReg, Mips::SP);
+ MIRBuilder.buildCopy(SPReg, Register(Mips::SP));
Register OffsetReg = MRI.createGenericVirtualRegister(s32);
unsigned Offset = VA.getLocMemOffset();
@@ -310,11 +310,11 @@
const CCValAssign &VA) {
MachineMemOperand *MMO;
Register Addr = getStackAddress(VA, MMO);
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}
-unsigned OutgoingValueHandler::extendRegister(Register ValReg,
+Register OutgoingValueHandler::extendRegister(Register ValReg,
const CCValAssign &VA) {
LLT LocTy{VA.getLocVT()};
switch (VA.getLocInfo()) {
@@ -530,7 +530,7 @@
Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
MIB.addDef(Mips::SP, RegState::Implicit);
if (IsCalleeGlobalPIC) {
- unsigned CalleeReg =
+ Register CalleeReg =
MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
MachineInstr *CalleeGlobalValue =
MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
@@ -583,8 +583,8 @@
if (IsCalleeGlobalPIC) {
MIRBuilder.buildCopy(
- Mips::GP,
- MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
+ Register(Mips::GP),
+ MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
MIB.addDef(Mips::GP, RegState::Implicit);
}
MIRBuilder.insertInstr(MIB);
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.h b/llvm/lib/Target/Mips/MipsCallLowering.h
index 4eacb7c..4ed75a3 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.h
+++ b/llvm/lib/Target/Mips/MipsCallLowering.h
@@ -45,7 +45,7 @@
private:
bool assign(Register VReg, const CCValAssign &VA, const EVT &VT);
- virtual unsigned getStackAddress(const CCValAssign &VA,
+ virtual Register getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) = 0;
virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA,
diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index fe01b15..8e597dc 100644
--- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -38,7 +38,7 @@
private:
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
- bool materialize32BitImm(unsigned DestReg, APInt Imm,
+ bool materialize32BitImm(Register DestReg, APInt Imm,
MachineIRBuilder &B) const;
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
@@ -80,7 +80,7 @@
bool MipsInstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
- unsigned DstReg = I.getOperand(0).getReg();
+ Register DstReg = I.getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
return true;
@@ -104,12 +104,12 @@
return true;
}
-bool MipsInstructionSelector::materialize32BitImm(unsigned DestReg, APInt Imm,
+bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
MachineIRBuilder &B) const {
assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
// Ori zero extends immediate. Used for values with zeros in high 16 bits.
if (Imm.getHiBits(16).isNullValue()) {
- MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Mips::ZERO})
+ MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
.addImm(Imm.getLoBits(16).getLimitedValue());
return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
}
@@ -121,12 +121,12 @@
}
// ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
if (Imm.isSignedIntN(16)) {
- MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Mips::ZERO})
+ MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
.addImm(Imm.getLoBits(16).getLimitedValue());
return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
}
// Values that cannot be materialized with single immediate instruction.
- unsigned LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
+ Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
.addImm(Imm.getHiBits(16).getLimitedValue());
MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
@@ -201,7 +201,7 @@
switch (I.getOpcode()) {
case G_UMULH: {
- unsigned PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
+ Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
MachineInstr *PseudoMULTu, *PseudoMove;
PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
@@ -242,7 +242,7 @@
break;
}
case G_PHI: {
- const unsigned DestReg = I.getOperand(0).getReg();
+ const Register DestReg = I.getOperand(0).getReg();
const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
@@ -257,7 +257,7 @@
case G_LOAD:
case G_ZEXTLOAD:
case G_SEXTLOAD: {
- const unsigned DestReg = I.getOperand(0).getReg();
+ const Register DestReg = I.getOperand(0).getReg();
const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
@@ -281,7 +281,7 @@
case G_UREM:
case G_SDIV:
case G_SREM: {
- unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
+ Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
@@ -328,7 +328,7 @@
unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
if (Size == 32) {
- unsigned GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
MachineIRBuilder B(I);
if (!materialize32BitImm(GPRReg, APImm, B))
return false;
@@ -339,8 +339,8 @@
return false;
}
if (Size == 64) {
- unsigned GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
- unsigned GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
MachineIRBuilder B(I);
if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
return false;
@@ -419,7 +419,7 @@
return false;
if (GVal->hasLocalLinkage()) {
- unsigned LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
LWGOT->getOperand(0).setReg(LWGOTDef);
MachineInstr *ADDiu =
@@ -432,7 +432,7 @@
return false;
}
} else {
- unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
.addDef(LUiReg)
@@ -455,8 +455,9 @@
}
case G_ICMP: {
struct Instr {
- unsigned Opcode, Def, LHS, RHS;
- Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS)
+ unsigned Opcode;
+ Register Def, LHS, RHS;
+ Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
: Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
bool hasImm() const {
@@ -467,10 +468,10 @@
};
SmallVector<struct Instr, 2> Instructions;
- unsigned ICMPReg = I.getOperand(0).getReg();
- unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
- unsigned LHS = I.getOperand(2).getReg();
- unsigned RHS = I.getOperand(3).getReg();
+ Register ICMPReg = I.getOperand(0).getReg();
+ Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
+ Register LHS = I.getOperand(2).getReg();
+ Register RHS = I.getOperand(3).getReg();
CmpInst::Predicate Cond =
static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
diff --git a/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/llvm/lib/Target/Mips/MipsMachineFunction.cpp
index d489fac..85b20fc 100644
--- a/llvm/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/llvm/lib/Target/Mips/MipsMachineFunction.cpp
@@ -44,14 +44,14 @@
return Mips::GPR32RegClass;
}
-unsigned MipsFunctionInfo::getGlobalBaseReg() {
+Register MipsFunctionInfo::getGlobalBaseReg() {
if (!GlobalBaseReg)
GlobalBaseReg =
MF.getRegInfo().createVirtualRegister(&getGlobalBaseRegClass(MF));
return GlobalBaseReg;
}
-unsigned MipsFunctionInfo::getGlobalBaseRegForGlobalISel() {
+Register MipsFunctionInfo::getGlobalBaseRegForGlobalISel() {
if (!GlobalBaseReg) {
getGlobalBaseReg();
initGlobalBaseReg();
diff --git a/llvm/lib/Target/Mips/MipsMachineFunction.h b/llvm/lib/Target/Mips/MipsMachineFunction.h
index d9d53c8..aaa1e0e 100644
--- a/llvm/lib/Target/Mips/MipsMachineFunction.h
+++ b/llvm/lib/Target/Mips/MipsMachineFunction.h
@@ -32,8 +32,8 @@
void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
bool globalBaseRegSet() const;
- unsigned getGlobalBaseReg();
- unsigned getGlobalBaseRegForGlobalISel();
+ Register getGlobalBaseReg();
+ Register getGlobalBaseRegForGlobalISel();
// Insert instructions to initialize the global base register in the
// first MBB of the function.
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index 2592ec0..3f5b007 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -101,28 +101,28 @@
DL(MIRBuilder.getMF().getDataLayout()),
STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {}
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0));
LLT SType = LLT::scalar(DL.getPointerSizeInBits(0));
- unsigned SPReg = MRI.createGenericVirtualRegister(p0);
+ Register SPReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister());
- unsigned OffsetReg = MRI.createGenericVirtualRegister(SType);
+ Register OffsetReg = MRI.createGenericVirtualRegister(SType);
MIRBuilder.buildConstant(OffsetReg, Offset);
- unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
return AddrReg;
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
MIB.addUse(PhysReg, RegState::Implicit);
- unsigned ExtReg;
+ Register ExtReg;
// If we are copying the value to a physical register with the
// size larger than the size of the value itself - build AnyExt
// to the size of the register first and only then do the copy.
@@ -143,9 +143,9 @@
MIRBuilder.buildCopy(PhysReg, ExtReg);
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
- unsigned ExtReg = extendRegister(ValVReg, VA);
+ Register ExtReg = extendRegister(ValVReg, VA);
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
/* Alignment */ 1);
@@ -230,7 +230,7 @@
bool isArgumentHandler() const override { return true; }
- unsigned getStackAddress(uint64_t Size, int64_t Offset,
+ Register getStackAddress(uint64_t Size, int64_t Offset,
MachinePointerInfo &MPO) override {
auto &MFI = MIRBuilder.getMF().getFrameInfo();
int FI = MFI.CreateFixedObject(Size, Offset, true);
@@ -242,7 +242,7 @@
return AddrReg;
}
- void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
+ void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
MachinePointerInfo &MPO, CCValAssign &VA) override {
auto MMO = MIRBuilder.getMF().getMachineMemOperand(
MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
@@ -250,7 +250,7 @@
MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
}
- void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
+ void assignValueToReg(Register ValVReg, Register PhysReg,
CCValAssign &VA) override {
markPhysRegUsed(PhysReg);
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 75c0514..b829208 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -136,13 +136,13 @@
Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const;
- unsigned getStackRegister() const { return StackPtr; }
- unsigned getBaseRegister() const { return BasePtr; }
+ Register getStackRegister() const { return StackPtr; }
+ Register getBaseRegister() const { return BasePtr; }
/// Returns physical register used as frame pointer.
/// This will always returns the frame pointer register, contrary to
/// getFrameRegister() which returns the "base pointer" in situations
/// involving a stack, frame and base pointer.
- unsigned getFramePtr() const { return FramePtr; }
+ Register getFramePtr() const { return FramePtr; }
// FIXME: Move to FrameInfok
unsigned getSlotSize() const { return SlotSize; }
};