CodeGen: Introduce a class for registers
Avoids using a plain unsigned for registers throughoug codegen.
Doesn't attempt to change every register use, just something a little
more than the set needed to build after changing the return type of
MachineOperand::getReg().
llvm-svn: 364191
diff --git a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
index ee37552..2435daa 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp
@@ -38,12 +38,12 @@
// If @MI is a DBG_VALUE with debug value described by a
// defined register, returns the number of this register.
// In the other case, returns 0.
-static unsigned isDescribedByReg(const MachineInstr &MI) {
+static Register isDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue());
assert(MI.getNumOperands() == 4);
// If location of variable is described using a register (directly or
// indirectly), this register is always a first operand.
- return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
+ return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
bool DbgValueHistoryMap::startDbgValue(InlinedEntity Var,
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 9372740..c996312 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -27,8 +27,8 @@
void CallLowering::anchor() {}
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
- unsigned ResReg, ArrayRef<unsigned> ArgRegs,
- unsigned SwiftErrorVReg,
+ Register ResReg, ArrayRef<Register> ArgRegs,
+ Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const {
auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
@@ -131,7 +131,7 @@
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
// Try to use the register type if we couldn't assign the VT.
if (!Handler.isArgumentHandler() || !CurVT.isValid())
- return false;
+ return false;
CurVT = TLI->getRegisterTypeForCallingConv(
F.getContext(), F.getCallingConv(), EVT(CurVT));
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 35c9132..b47f6d9 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -169,7 +169,7 @@
return *Regs;
}
-ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
+ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
auto VRegsIt = VMap.findVRegs(Val);
if (VRegsIt != VMap.vregs_end())
return *VRegsIt->second;
@@ -363,11 +363,11 @@
if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
Ret = nullptr;
- ArrayRef<unsigned> VRegs;
+ ArrayRef<Register> VRegs;
if (Ret)
VRegs = getOrCreateVRegs(*Ret);
- unsigned SwiftErrorVReg = 0;
+ Register SwiftErrorVReg = 0;
if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
&RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
@@ -858,7 +858,7 @@
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
- ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
+ ArrayRef<Register> Regs = getOrCreateVRegs(LI);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
@@ -875,7 +875,7 @@
for (unsigned i = 0; i < Regs.size(); ++i) {
- unsigned Addr = 0;
+ Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
@@ -899,7 +899,7 @@
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
- ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
+ ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
@@ -916,7 +916,7 @@
}
for (unsigned i = 0; i < Vals.size(); ++i) {
- unsigned Addr = 0;
+ Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
@@ -958,7 +958,7 @@
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
uint64_t Offset = getOffsetFromIndices(U, *DL);
- ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
auto &DstRegs = allocateVRegs(U);
@@ -975,8 +975,8 @@
uint64_t Offset = getOffsetFromIndices(U, *DL);
auto &DstRegs = allocateVRegs(U);
ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
- ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
- ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
+ ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
auto InsertedIt = InsertedRegs.begin();
for (unsigned i = 0; i < DstRegs.size(); ++i) {
@@ -992,9 +992,9 @@
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
unsigned Tst = getOrCreateVReg(*U.getOperand(0));
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
- ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
- ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
+ ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
+ ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
const SelectInst &SI = cast<SelectInst>(U);
uint16_t Flags = 0;
@@ -1186,7 +1186,7 @@
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
MIRBuilder.buildInstr(Op)
.addDef(ResRegs[0])
.addDef(ResRegs[1])
@@ -1539,7 +1539,7 @@
unsigned IRTranslator::packRegs(const Value &V,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
LLT BigTy = getLLTForType(*V.getType(), *DL);
@@ -1558,7 +1558,7 @@
void IRTranslator::unpackRegs(const Value &V, unsigned Src,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
for (unsigned i = 0; i < Regs.size(); ++i)
@@ -1586,12 +1586,12 @@
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
bool IsSplitType = valueIsSplit(CI);
- unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
+ Register Res = IsSplitType ? MRI->createGenericVirtualRegister(
getLLTForType(*CI.getType(), *DL))
: getOrCreateVReg(CI);
- SmallVector<unsigned, 8> Args;
- unsigned SwiftErrorVReg = 0;
+ SmallVector<Register, 8> Args;
+ Register SwiftErrorVReg;
for (auto &Arg: CI.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
@@ -1622,7 +1622,7 @@
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;
- ArrayRef<unsigned> ResultRegs;
+ ArrayRef<Register> ResultRegs;
if (!CI.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CI);
@@ -1690,8 +1690,8 @@
unsigned Res = 0;
if (!I.getType()->isVoidTy())
Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
- SmallVector<unsigned, 8> Args;
- unsigned SwiftErrorVReg = 0;
+ SmallVector<Register, 8> Args;
+ Register SwiftErrorVReg;
for (auto &Arg : I.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
@@ -1776,7 +1776,7 @@
return false;
MBB.addLiveIn(ExceptionReg);
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
@@ -2069,7 +2069,7 @@
SmallSet<const MachineBasicBlock *, 16> SeenPreds;
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
- ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
+ ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
if (SeenPreds.count(Pred))
continue;
@@ -2136,7 +2136,7 @@
// Return the scalar if it is a <1 x Ty> vector.
if (CAZ->getNumElements() == 1)
return translate(*CAZ->getElementValue(0u), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
Constant &Elt = *CAZ->getElementValue(i);
Ops.push_back(getOrCreateVReg(Elt));
@@ -2146,7 +2146,7 @@
// Return the scalar if it is a <1 x Ty> vector.
if (CV->getNumElements() == 1)
return translate(*CV->getElementAsConstant(0), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumElements(); ++i) {
Constant &Elt = *CV->getElementAsConstant(i);
Ops.push_back(getOrCreateVReg(Elt));
@@ -2164,7 +2164,7 @@
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
}
@@ -2274,7 +2274,7 @@
EntryBB->addSuccessor(&getMBB(F.front()));
// Lower the actual args into this basic block.
- SmallVector<unsigned, 8> VRegArgs;
+ SmallVector<Register, 8> VRegArgs;
for (const Argument &Arg: F.args()) {
if (DL->getTypeStoreSize(Arg.getType()) == 0)
continue; // Don't handle zero sized types.
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index d153a35..823bcab 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -115,17 +115,17 @@
}
}
-void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
- SmallVectorImpl<unsigned> &VRegs) {
+void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
+ SmallVectorImpl<Register> &VRegs) {
for (int i = 0; i < NumParts; ++i)
VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
MIRBuilder.buildUnmerge(VRegs, Reg);
}
-bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
+bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
LLT MainTy, LLT &LeftoverTy,
- SmallVectorImpl<unsigned> &VRegs,
- SmallVectorImpl<unsigned> &LeftoverRegs) {
+ SmallVectorImpl<Register> &VRegs,
+ SmallVectorImpl<Register> &LeftoverRegs) {
assert(!LeftoverTy.isValid() && "this is an out argument");
unsigned RegSize = RegTy.getSizeInBits();
@@ -152,14 +152,14 @@
// For irregular sizes, extract the individual parts.
for (unsigned I = 0; I != NumParts; ++I) {
- unsigned NewReg = MRI.createGenericVirtualRegister(MainTy);
+ Register NewReg = MRI.createGenericVirtualRegister(MainTy);
VRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
}
for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
Offset += LeftoverSize) {
- unsigned NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
+ Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
LeftoverRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, Offset);
}
@@ -167,11 +167,11 @@
return true;
}
-void LegalizerHelper::insertParts(unsigned DstReg,
+void LegalizerHelper::insertParts(Register DstReg,
LLT ResultTy, LLT PartTy,
- ArrayRef<unsigned> PartRegs,
+ ArrayRef<Register> PartRegs,
LLT LeftoverTy,
- ArrayRef<unsigned> LeftoverRegs) {
+ ArrayRef<Register> LeftoverRegs) {
if (!LeftoverTy.isValid()) {
assert(LeftoverRegs.empty());
@@ -469,7 +469,7 @@
return UnableToLegalize;
int NumParts = SizeOp0 / NarrowSize;
- SmallVector<unsigned, 2> DstRegs;
+ SmallVector<Register, 2> DstRegs;
for (int i = 0; i < NumParts; ++i)
DstRegs.push_back(
MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
@@ -489,7 +489,7 @@
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts = TotalSize / NarrowSize;
- SmallVector<unsigned, 4> PartRegs;
+ SmallVector<Register, 4> PartRegs;
for (int I = 0; I != NumParts; ++I) {
unsigned Offset = I * NarrowSize;
auto K = MIRBuilder.buildConstant(NarrowTy,
@@ -499,7 +499,7 @@
LLT LeftoverTy;
unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
- SmallVector<unsigned, 1> LeftoverRegs;
+ SmallVector<Register, 1> LeftoverRegs;
if (LeftoverBits != 0) {
LeftoverTy = LLT::scalar(LeftoverBits);
auto K = MIRBuilder.buildConstant(
@@ -522,7 +522,7 @@
// Expand in terms of carry-setting/consuming G_ADDE instructions.
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
@@ -555,7 +555,7 @@
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
@@ -761,7 +761,7 @@
// Use concat_vectors if the result is a multiple of the number of elements.
if (NumParts * OldElts == NewElts) {
- SmallVector<unsigned, 8> Parts;
+ SmallVector<Register, 8> Parts;
Parts.push_back(MO.getReg());
unsigned ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
@@ -785,7 +785,7 @@
if (TypeIdx != 1)
return UnableToLegalize;
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (!DstTy.isScalar())
return UnableToLegalize;
@@ -795,17 +795,17 @@
unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
unsigned Src1 = MI.getOperand(1).getReg();
- unsigned ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
+ Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
for (unsigned I = 2; I != NumOps; ++I) {
const unsigned Offset = (I - 1) * PartSize;
- unsigned SrcReg = MI.getOperand(I).getReg();
+ Register SrcReg = MI.getOperand(I).getReg();
assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
auto ZextInput = MIRBuilder.buildZExt(DstTy, SrcReg);
- unsigned NextResult = I + 1 == NumOps ? DstReg :
+ Register NextResult = I + 1 == NumOps ? DstReg :
MRI.createGenericVirtualRegister(DstTy);
auto ShiftAmt = MIRBuilder.buildConstant(DstTy, Offset);
@@ -825,12 +825,12 @@
return UnableToLegalize;
unsigned NumDst = MI.getNumOperands() - 1;
- unsigned SrcReg = MI.getOperand(NumDst).getReg();
+ Register SrcReg = MI.getOperand(NumDst).getReg();
LLT SrcTy = MRI.getType(SrcReg);
if (!SrcTy.isScalar())
return UnableToLegalize;
- unsigned Dst0Reg = MI.getOperand(0).getReg();
+ Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
if (!DstTy.isScalar())
return UnableToLegalize;
@@ -861,8 +861,8 @@
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = MI.getOperand(1).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
LLT DstTy = MRI.getType(DstReg);
@@ -1617,7 +1617,7 @@
LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
- SmallVector<unsigned, 2> DstRegs;
+ SmallVector<Register, 2> DstRegs;
unsigned NarrowSize = NarrowTy.getSizeInBits();
unsigned DstReg = MI.getOperand(0).getReg();
@@ -1702,7 +1702,7 @@
return Legalized;
}
- SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
+ SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
@@ -1773,8 +1773,8 @@
SmallVector<MachineInstrBuilder, 4> NewInsts;
- SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
- SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
+ SmallVector<Register, 4> PartRegs, LeftoverRegs;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
LLT LeftoverTy;
@@ -1861,7 +1861,7 @@
NarrowTy1 = SrcTy.getElementType();
}
- SmallVector<unsigned, 4> SrcRegs, DstRegs;
+ SmallVector<Register, 4> SrcRegs, DstRegs;
extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
for (unsigned I = 0; I < NumParts; ++I) {
@@ -1924,7 +1924,7 @@
CmpInst::Predicate Pred
= static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
@@ -1953,8 +1953,8 @@
LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned CondReg = MI.getOperand(1).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register CondReg = MI.getOperand(1).getReg();
unsigned NumParts = 0;
LLT NarrowTy0, NarrowTy1;
@@ -1999,7 +1999,7 @@
}
}
- SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
+ SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
if (CondTy.isVector())
extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
@@ -2007,7 +2007,7 @@
extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
for (unsigned i = 0; i < NumParts; ++i) {
- unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
+ Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
Src1Regs[i], Src2Regs[i]);
DstRegs.push_back(DstReg);
@@ -2038,7 +2038,7 @@
if (NumParts < 0)
return UnableToLegalize;
- SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
+ SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
SmallVector<MachineInstrBuilder, 4> NewInsts;
const int TotalNumParts = NumParts + NumLeftover;
@@ -2046,7 +2046,7 @@
// Insert the new phis in the result block first.
for (int I = 0; I != TotalNumParts; ++I) {
LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
- unsigned PartDstReg = MRI.createGenericVirtualRegister(Ty);
+ Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
.addDef(PartDstReg));
if (I < NumParts)
@@ -2059,7 +2059,7 @@
MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
- SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
+ SmallVector<Register, 4> PartRegs, LeftoverRegs;
// Insert code to extract the incoming values in each predecessor block.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
@@ -2105,14 +2105,14 @@
return UnableToLegalize;
bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
- unsigned ValReg = MI.getOperand(0).getReg();
- unsigned AddrReg = MI.getOperand(1).getReg();
+ Register ValReg = MI.getOperand(0).getReg();
+ Register AddrReg = MI.getOperand(1).getReg();
LLT ValTy = MRI.getType(ValReg);
int NumParts = -1;
int NumLeftover = -1;
LLT LeftoverTy;
- SmallVector<unsigned, 8> NarrowRegs, NarrowLeftoverRegs;
+ SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
if (IsLoad) {
std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
} else {
@@ -2134,7 +2134,7 @@
// is a load, return the new registers in ValRegs. For a store, each elements
// of ValRegs should be PartTy. Returns the next offset that needs to be
// handled.
- auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<unsigned> &ValRegs,
+ auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
unsigned Offset) -> unsigned {
MachineFunction &MF = MIRBuilder.getMF();
unsigned PartSize = PartTy.getSizeInBits();
@@ -2142,7 +2142,7 @@
Offset += PartSize, ++Idx) {
unsigned ByteSize = PartSize / 8;
unsigned ByteOffset = Offset / 8;
- unsigned NewAddrReg = 0;
+ Register NewAddrReg;
MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
@@ -2150,7 +2150,7 @@
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
if (IsLoad) {
- unsigned Dst = MRI.createGenericVirtualRegister(PartTy);
+ Register Dst = MRI.createGenericVirtualRegister(PartTy);
ValRegs.push_back(Dst);
MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
} else {
@@ -2401,7 +2401,7 @@
auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
- unsigned ResultRegs[2];
+ Register ResultRegs[2];
switch (MI.getOpcode()) {
case TargetOpcode::G_SHL: {
// Short: ShAmt < NewBitSize
@@ -2556,9 +2556,9 @@
}
}
-void LegalizerHelper::multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
- ArrayRef<unsigned> Src1Regs,
- ArrayRef<unsigned> Src2Regs,
+void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
+ ArrayRef<Register> Src1Regs,
+ ArrayRef<Register> Src2Regs,
LLT NarrowTy) {
MachineIRBuilder &B = MIRBuilder;
unsigned SrcParts = Src1Regs.size();
@@ -2570,7 +2570,7 @@
DstRegs[DstIdx] = FactorSum;
unsigned CarrySumPrevDstIdx;
- SmallVector<unsigned, 4> Factors;
+ SmallVector<Register, 4> Factors;
for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
// Collect low parts of muls for DstIdx.
@@ -2621,9 +2621,9 @@
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned Src1 = MI.getOperand(1).getReg();
- unsigned Src2 = MI.getOperand(2).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src1 = MI.getOperand(1).getReg();
+ Register Src2 = MI.getOperand(2).getReg();
LLT Ty = MRI.getType(DstReg);
if (Ty.isVector())
@@ -2640,14 +2640,14 @@
bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
- SmallVector<unsigned, 2> Src1Parts, Src2Parts, DstTmpRegs;
+ SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
DstTmpRegs.resize(DstTmpParts);
multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
// Take only high half of registers if this is high mul.
- ArrayRef<unsigned> DstRegs(
+ ArrayRef<Register> DstRegs(
IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
MIRBuilder.buildMerge(DstReg, DstRegs);
MI.eraseFromParent();
@@ -2669,7 +2669,7 @@
return UnableToLegalize;
int NumParts = SizeOp1 / NarrowSize;
- SmallVector<unsigned, 2> SrcRegs, DstRegs;
+ SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
@@ -2736,7 +2736,7 @@
int NumParts = SizeOp0 / NarrowSize;
- SmallVector<unsigned, 2> SrcRegs, DstRegs;
+ SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
@@ -2802,9 +2802,9 @@
assert(MI.getNumOperands() == 3 && TypeIdx == 0);
- SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
- SmallVector<unsigned, 4> Src0Regs, Src0LeftoverRegs;
- SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
+ SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
+ SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
Src0Regs, Src0LeftoverRegs))
@@ -2849,9 +2849,9 @@
unsigned DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
- SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
- SmallVector<unsigned, 4> Src2Regs, Src2LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
+ SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
+ SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
Src1Regs, Src1LeftoverRegs))
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index df5616e..b116991 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -210,7 +210,7 @@
}
Optional<MachineInstrBuilder>
-MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
+MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
@@ -506,7 +506,7 @@
return Extract;
}
-void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
ArrayRef<uint64_t> Indices) {
#ifndef NDEBUG
assert(Ops.size() == Indices.size() && "incompatible args");
@@ -535,11 +535,11 @@
return;
}
- unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
+ Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
buildUndef(ResIn);
for (unsigned i = 0; i < Ops.size(); ++i) {
- unsigned ResOut = i + 1 == Ops.size()
+ Register ResOut = i + 1 == Ops.size()
? Res
: getMRI()->createGenericVirtualRegister(ResTy);
buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
@@ -552,7 +552,7 @@
}
MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@@ -572,13 +572,13 @@
MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
const SrcOp &Op) {
unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
- SmallVector<unsigned, 8> TmpVec;
+ SmallVector<Register, 8> TmpVec;
for (unsigned I = 0; I != NumReg; ++I)
TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
return buildUnmerge(TmpVec, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
+MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
const SrcOp &Op) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
// we need some temporary storage for the DstOp objects. Here we use a
@@ -588,7 +588,7 @@
}
MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@@ -604,7 +604,7 @@
MachineInstrBuilder
MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@@ -613,7 +613,7 @@
}
MachineInstrBuilder
-MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
+MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
@@ -621,8 +621,8 @@
return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
}
-MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
- unsigned Op, unsigned Index) {
+MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
+ Register Op, unsigned Index) {
assert(Index + getMRI()->getType(Op).getSizeInBits() <=
getMRI()->getType(Res).getSizeInBits() &&
"insertion past the end of a register");
@@ -640,7 +640,7 @@
}
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
- ArrayRef<unsigned> ResultRegs,
+ ArrayRef<Register> ResultRegs,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
diff --git a/llvm/lib/CodeGen/LiveDebugValues.cpp b/llvm/lib/CodeGen/LiveDebugValues.cpp
index 5eb2440..19bd354 100644
--- a/llvm/lib/CodeGen/LiveDebugValues.cpp
+++ b/llvm/lib/CodeGen/LiveDebugValues.cpp
@@ -70,12 +70,12 @@
// If @MI is a DBG_VALUE with debug value described by a defined
// register, returns the number of this register. In the other case, returns 0.
-static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) {
+static Register isDbgValueDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue() && "expected a DBG_VALUE");
assert(MI.getNumOperands() == 4 && "malformed DBG_VALUE");
// If location of variable is described using a register (directly
// or indirectly), this register is always a first operand.
- return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
+ return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
namespace {
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 5765415..4fa4ea7 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -342,7 +342,7 @@
switch (MO.getType()) {
case MachineOperand::MO_Register:
// Register operands don't have target flags.
- return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
+ return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef());
case MachineOperand::MO_Immediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
case MachineOperand::MO_CImmediate:
diff --git a/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index a632286..be4b13b 100644
--- a/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -154,7 +154,7 @@
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
-unsigned
+Register
MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name) {
assert(RegClass && "Cannot create register without RegClass!");
@@ -169,7 +169,7 @@
return Reg;
}
-unsigned MachineRegisterInfo::cloneVirtualRegister(unsigned VReg,
+Register MachineRegisterInfo::cloneVirtualRegister(Register VReg,
StringRef Name) {
unsigned Reg = createIncompleteVirtualRegister(Name);
VRegInfo[Reg].first = VRegInfo[VReg].first;
@@ -184,7 +184,7 @@
VRegToType[VReg] = Ty;
}
-unsigned
+Register
MachineRegisterInfo::createGenericVirtualRegister(LLT Ty, StringRef Name) {
// New virtual register number.
unsigned Reg = createIncompleteVirtualRegister(Name);
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp
index defc30b..6485040 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.cpp
+++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp
@@ -2874,14 +2874,14 @@
if (!Instr.isFullCopy())
continue;
// Look for the other end of the copy.
- unsigned OtherReg = Instr.getOperand(0).getReg();
+ Register OtherReg = Instr.getOperand(0).getReg();
if (OtherReg == Reg) {
OtherReg = Instr.getOperand(1).getReg();
if (OtherReg == Reg)
continue;
}
// Get the current assignment.
- unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
+ Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
? OtherReg
: VRM->getPhys(OtherReg);
// Push the collected information.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f5a7c08..8a04d62 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7879,7 +7879,7 @@
for (; NumRegs; --NumRegs, ++I) {
assert(I != RC->end() && "Ran out of registers to allocate!");
- auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
+ Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
Regs.push_back(R);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 5c41e6a..9d86778 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -570,7 +570,7 @@
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
- unsigned Reg =
+ Register Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
diff --git a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
index 0359053..e59cd8e 100644
--- a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
+++ b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
@@ -42,7 +42,7 @@
}
void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB,
- const Value *Val, unsigned VReg) {
+ const Value *Val, Register VReg) {
VRegDefMap[std::make_pair(MBB, Val)] = VReg;
}
@@ -161,7 +161,7 @@
auto UUseIt = VRegUpwardsUse.find(Key);
auto VRegDefIt = VRegDefMap.find(Key);
bool UpwardsUse = UUseIt != VRegUpwardsUse.end();
- unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
+ Register UUseVReg = UpwardsUse ? UUseIt->second : Register();
bool DownwardDef = VRegDefIt != VRegDefMap.end();
assert(!(UpwardsUse && !DownwardDef) &&
"We can't have an upwards use but no downwards def");
@@ -238,7 +238,7 @@
// destination virtual register number otherwise we generate a new one.
auto &DL = MF->getDataLayout();
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
- unsigned PHIVReg =
+ Register PHIVReg =
UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder PHI =
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 0e30d6e..cc3012a 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -163,9 +163,9 @@
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
- unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
- unsigned Reg1 = MI.getOperand(Idx1).getReg();
- unsigned Reg2 = MI.getOperand(Idx2).getReg();
+ Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
+ Register Reg1 = MI.getOperand(Idx1).getReg();
+ Register Reg2 = MI.getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();