Move some more instruction creation methods from RegisterInfo into InstrInfo.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45484 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index dc21518..6592732 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -173,7 +173,8 @@
DOUT << "********** REWRITE MACHINE CODE **********\n";
DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
const TargetMachine &TM = MF.getTarget();
- const MRegisterInfo &MRI = *TM.getRegisterInfo();
+ const TargetInstrInfo &TII = *TM.getInstrInfo();
+
// LoadedRegs - Keep track of which vregs are loaded, so that we only load
// each vreg once (in the case where a spilled vreg is used by multiple
@@ -202,14 +203,14 @@
if (MO.isUse() &&
std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
== LoadedRegs.end()) {
- MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
+ TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
LoadedRegs.push_back(VirtReg);
++NumLoads;
DOUT << '\t' << *prior(MII);
}
if (MO.isDef()) {
- MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
+ TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
StackSlot, RC);
++NumStores;
}
@@ -645,6 +646,9 @@
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM) {
+ const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
+ .getInstrInfo();
+
if (Reuses.empty()) return PhysReg; // This is most often empty.
for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
@@ -693,7 +697,7 @@
VRM.getReMaterializedMI(NewOp.VirtReg));
++NumReMats;
} else {
- MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
+ TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
NewOp.StackSlotOrReMat, AliasRC);
// Any stores to this stack slot are not dead anymore.
MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
@@ -876,7 +880,7 @@
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM) {
- MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
+ TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
DOUT << "Store:\t" << *next(MII);
// If there is a dead store to this stack slot, nuke it now.
@@ -979,7 +983,7 @@
++NumReMats;
} else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
+ TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
RC);
++NumLoads;
}
@@ -1002,7 +1006,7 @@
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
unsigned Phys = VRM.getPhys(VirtReg);
int StackSlot = VRM.getStackSlot(VirtReg);
- MRI->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
+ TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
MachineInstr *StoreMI = next(MII);
DOUT << "Store:\t" << StoreMI;
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
@@ -1218,7 +1222,7 @@
++NumReMats;
} else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
+ TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
++NumLoads;
}
// This invalidates PhysReg.