blob: 3cbecf4a0d8f83de58b138c46034b35de49f5086 [file] [log] [blame]
Lang Hames87e3bca2009-05-06 02:36:21 +00001//===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "virtregrewriter"
11#include "VirtRegRewriter.h"
12#include "llvm/Support/Compiler.h"
13#include "llvm/ADT/DepthFirstIterator.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/ADT/STLExtras.h"
16#include <algorithm>
17using namespace llvm;
18
19STATISTIC(NumDSE , "Number of dead stores elided");
20STATISTIC(NumDSS , "Number of dead spill slots removed");
21STATISTIC(NumCommutes, "Number of instructions commuted");
22STATISTIC(NumDRM , "Number of re-materializable defs elided");
23STATISTIC(NumStores , "Number of stores added");
24STATISTIC(NumPSpills , "Number of physical register spills");
25STATISTIC(NumOmitted , "Number of reloads omited");
26STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
27STATISTIC(NumCopified, "Number of available reloads turned into copies");
28STATISTIC(NumReMats , "Number of re-materialization");
29STATISTIC(NumLoads , "Number of loads added");
30STATISTIC(NumReused , "Number of values reused");
31STATISTIC(NumDCE , "Number of copies elided");
32STATISTIC(NumSUnfold , "Number of stores unfolded");
33STATISTIC(NumModRefUnfold, "Number of modref unfolded");
34
35namespace {
36 enum RewriterName { simple, local };
37}
38
39static cl::opt<RewriterName>
40RewriterOpt("rewriter",
41 cl::desc("Rewriter to use: (default: local)"),
42 cl::Prefix,
43 cl::values(clEnumVal(simple, "simple rewriter"),
44 clEnumVal(local, "local rewriter"),
45 clEnumValEnd),
46 cl::init(local));
47
48VirtRegRewriter::~VirtRegRewriter() {}
49
50
51// ****************************** //
52// Simple Spiller Implementation //
53// ****************************** //
54
55struct VISIBILITY_HIDDEN SimpleRewriter : public VirtRegRewriter {
56
57 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
58 LiveIntervals* LIs) {
59 DOUT << "********** REWRITE MACHINE CODE **********\n";
60 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
61 const TargetMachine &TM = MF.getTarget();
62 const TargetInstrInfo &TII = *TM.getInstrInfo();
63 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
64
65
66 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
67 // each vreg once (in the case where a spilled vreg is used by multiple
68 // operands). This is always smaller than the number of operands to the
69 // current machine instr, so it should be small.
70 std::vector<unsigned> LoadedRegs;
71
72 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
73 MBBI != E; ++MBBI) {
74 DOUT << MBBI->getBasicBlock()->getName() << ":\n";
75 MachineBasicBlock &MBB = *MBBI;
76 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
77 MII != E; ++MII) {
78 MachineInstr &MI = *MII;
79 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
80 MachineOperand &MO = MI.getOperand(i);
81 if (MO.isReg() && MO.getReg()) {
82 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
83 unsigned VirtReg = MO.getReg();
84 unsigned SubIdx = MO.getSubReg();
85 unsigned PhysReg = VRM.getPhys(VirtReg);
86 unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
87 if (!VRM.isAssignedReg(VirtReg)) {
88 int StackSlot = VRM.getStackSlot(VirtReg);
89 const TargetRegisterClass* RC =
90 MF.getRegInfo().getRegClass(VirtReg);
91
92 if (MO.isUse() &&
93 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
94 == LoadedRegs.end()) {
95 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
96 MachineInstr *LoadMI = prior(MII);
97 VRM.addSpillSlotUse(StackSlot, LoadMI);
98 LoadedRegs.push_back(VirtReg);
99 ++NumLoads;
100 DOUT << '\t' << *LoadMI;
101 }
102
103 if (MO.isDef()) {
104 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
105 StackSlot, RC);
106 MachineInstr *StoreMI = next(MII);
107 VRM.addSpillSlotUse(StackSlot, StoreMI);
108 ++NumStores;
109 }
110 }
111 MF.getRegInfo().setPhysRegUsed(RReg);
112 MI.getOperand(i).setReg(RReg);
113 MI.getOperand(i).setSubReg(0);
114 } else {
115 MF.getRegInfo().setPhysRegUsed(MO.getReg());
116 }
117 }
118 }
119
120 DOUT << '\t' << MI;
121 LoadedRegs.clear();
122 }
123 }
124 return true;
125 }
126
127};
128
129// ************************************************************************ //
130
131/// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
132/// from top down, keep track of which spill slots or remat are available in
133/// each register.
134///
135/// Note that not all physregs are created equal here. In particular, some
136/// physregs are reloads that we are allowed to clobber or ignore at any time.
137/// Other physregs are values that the register allocated program is using
138/// that we cannot CHANGE, but we can read if we like. We keep track of this
139/// on a per-stack-slot / remat id basis as the low bit in the value of the
140/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
141/// this bit and addAvailable sets it if.
142class VISIBILITY_HIDDEN AvailableSpills {
143 const TargetRegisterInfo *TRI;
144 const TargetInstrInfo *TII;
145
146 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
147 // or remat'ed virtual register values that are still available, due to
148 // being loaded or stored to, but not invalidated yet.
149 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
150
151 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
152 // indicating which stack slot values are currently held by a physreg. This
153 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
154 // physreg is modified.
155 std::multimap<unsigned, int> PhysRegsAvailable;
156
157 void disallowClobberPhysRegOnly(unsigned PhysReg);
158
159 void ClobberPhysRegOnly(unsigned PhysReg);
160public:
161 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
162 : TRI(tri), TII(tii) {
163 }
164
165 /// clear - Reset the state.
166 void clear() {
167 SpillSlotsOrReMatsAvailable.clear();
168 PhysRegsAvailable.clear();
169 }
170
171 const TargetRegisterInfo *getRegInfo() const { return TRI; }
172
173 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
174 /// available in a physical register, return that PhysReg, otherwise
175 /// return 0.
176 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
177 std::map<int, unsigned>::const_iterator I =
178 SpillSlotsOrReMatsAvailable.find(Slot);
179 if (I != SpillSlotsOrReMatsAvailable.end()) {
180 return I->second >> 1; // Remove the CanClobber bit.
181 }
182 return 0;
183 }
184
185 /// addAvailable - Mark that the specified stack slot / remat is available
186 /// in the specified physreg. If CanClobber is true, the physreg can be
187 /// modified at any time without changing the semantics of the program.
188 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
189 // If this stack slot is thought to be available in some other physreg,
190 // remove its record.
191 ModifyStackSlotOrReMat(SlotOrReMat);
192
193 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
194 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
195 (unsigned)CanClobber;
196
197 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
198 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1;
199 else
200 DOUT << "Remembering SS#" << SlotOrReMat;
201 DOUT << " in physreg " << TRI->getName(Reg) << "\n";
202 }
203
204 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
205 /// the value of the specified stackslot register if it desires. The
206 /// specified stack slot must be available in a physreg for this query to
207 /// make sense.
208 bool canClobberPhysRegForSS(int SlotOrReMat) const {
209 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
210 "Value not available!");
211 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
212 }
213
214 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
215 /// physical register where values for some stack slot(s) might be
216 /// available.
217 bool canClobberPhysReg(unsigned PhysReg) const {
218 std::multimap<unsigned, int>::const_iterator I =
219 PhysRegsAvailable.lower_bound(PhysReg);
220 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
221 int SlotOrReMat = I->second;
222 I++;
223 if (!canClobberPhysRegForSS(SlotOrReMat))
224 return false;
225 }
226 return true;
227 }
228
229 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
230 /// stackslot register. The register is still available but is no longer
231 /// allowed to be modifed.
232 void disallowClobberPhysReg(unsigned PhysReg);
233
234 /// ClobberPhysReg - This is called when the specified physreg changes
235 /// value. We use this to invalidate any info about stuff that lives in
236 /// it and any of its aliases.
237 void ClobberPhysReg(unsigned PhysReg);
238
239 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
240 /// slot changes. This removes information about which register the
241 /// previous value for this slot lives in (as the previous value is dead
242 /// now).
243 void ModifyStackSlotOrReMat(int SlotOrReMat);
244
245 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
246 /// into the specified MBB. Add available physical registers as potential
247 /// live-in's. If they are reused in the MBB, they will be added to the
248 /// live-in set to make register scavenger and post-allocation scheduler.
249 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
250 std::vector<MachineOperand*> &KillOps);
251};
252
253// ************************************************************************ //
254
255// ReusedOp - For each reused operand, we keep track of a bit of information,
256// in case we need to rollback upon processing a new operand. See comments
257// below.
258struct ReusedOp {
259 // The MachineInstr operand that reused an available value.
260 unsigned Operand;
261
262 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
263 unsigned StackSlotOrReMat;
264
265 // PhysRegReused - The physical register the value was available in.
266 unsigned PhysRegReused;
267
268 // AssignedPhysReg - The physreg that was assigned for use by the reload.
269 unsigned AssignedPhysReg;
270
271 // VirtReg - The virtual register itself.
272 unsigned VirtReg;
273
274 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
275 unsigned vreg)
276 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
277 AssignedPhysReg(apr), VirtReg(vreg) {}
278};
279
280/// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
281/// is reused instead of reloaded.
282class VISIBILITY_HIDDEN ReuseInfo {
283 MachineInstr &MI;
284 std::vector<ReusedOp> Reuses;
285 BitVector PhysRegsClobbered;
286public:
287 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
288 PhysRegsClobbered.resize(tri->getNumRegs());
289 }
290
291 bool hasReuses() const {
292 return !Reuses.empty();
293 }
294
295 /// addReuse - If we choose to reuse a virtual register that is already
296 /// available instead of reloading it, remember that we did so.
297 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
298 unsigned PhysRegReused, unsigned AssignedPhysReg,
299 unsigned VirtReg) {
300 // If the reload is to the assigned register anyway, no undo will be
301 // required.
302 if (PhysRegReused == AssignedPhysReg) return;
303
304 // Otherwise, remember this.
305 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
306 AssignedPhysReg, VirtReg));
307 }
308
309 void markClobbered(unsigned PhysReg) {
310 PhysRegsClobbered.set(PhysReg);
311 }
312
313 bool isClobbered(unsigned PhysReg) const {
314 return PhysRegsClobbered.test(PhysReg);
315 }
316
317 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
318 /// is some other operand that is using the specified register, either pick
319 /// a new register to use, or evict the previous reload and use this reg.
320 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
321 AvailableSpills &Spills,
322 std::vector<MachineInstr*> &MaybeDeadStores,
323 SmallSet<unsigned, 8> &Rejected,
324 BitVector &RegKills,
325 std::vector<MachineOperand*> &KillOps,
326 VirtRegMap &VRM);
327
328 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
329 /// 'Rejected' set to remember which registers have been considered and
330 /// rejected for the reload. This avoids infinite looping in case like
331 /// this:
332 /// t1 := op t2, t3
333 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
334 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
335 /// t1 <- desires r1
336 /// sees r1 is taken by t2, tries t2's reload register r0
337 /// sees r0 is taken by t3, tries t3's reload register r1
338 /// sees r1 is taken by t2, tries t2's reload register r0 ...
339 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
340 AvailableSpills &Spills,
341 std::vector<MachineInstr*> &MaybeDeadStores,
342 BitVector &RegKills,
343 std::vector<MachineOperand*> &KillOps,
344 VirtRegMap &VRM) {
345 SmallSet<unsigned, 8> Rejected;
346 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected,
347 RegKills, KillOps, VRM);
348 }
349};
350
351
352// ****************** //
353// Utility Functions //
354// ****************** //
355
356/// InvalidateKill - A MI that defines the specified register is being deleted,
357/// invalidate the register kill information.
358static void InvalidateKill(unsigned Reg, BitVector &RegKills,
359 std::vector<MachineOperand*> &KillOps) {
360 if (RegKills[Reg]) {
361 KillOps[Reg]->setIsKill(false);
362 KillOps[Reg] = NULL;
363 RegKills.reset(Reg);
364 }
365}
366
367/// findSinglePredSuccessor - Return via reference a vector of machine basic
368/// blocks each of which is a successor of the specified BB and has no other
369/// predecessor.
370static void findSinglePredSuccessor(MachineBasicBlock *MBB,
371 SmallVectorImpl<MachineBasicBlock *> &Succs) {
372 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
373 SE = MBB->succ_end(); SI != SE; ++SI) {
374 MachineBasicBlock *SuccMBB = *SI;
375 if (SuccMBB->pred_size() == 1)
376 Succs.push_back(SuccMBB);
377 }
378}
379
380/// InvalidateKills - MI is going to be deleted. If any of its operands are
381/// marked kill, then invalidate the information.
382static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
383 std::vector<MachineOperand*> &KillOps,
384 SmallVector<unsigned, 2> *KillRegs = NULL) {
385 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
386 MachineOperand &MO = MI.getOperand(i);
387 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
388 continue;
389 unsigned Reg = MO.getReg();
390 if (TargetRegisterInfo::isVirtualRegister(Reg))
391 continue;
392 if (KillRegs)
393 KillRegs->push_back(Reg);
394 assert(Reg < KillOps.size());
395 if (KillOps[Reg] == &MO) {
396 RegKills.reset(Reg);
397 KillOps[Reg] = NULL;
398 }
399 }
400}
401
402/// InvalidateRegDef - If the def operand of the specified def MI is now dead
403/// (since it's spill instruction is removed), mark it isDead. Also checks if
404/// the def MI has other definition operands that are not dead. Returns it by
405/// reference.
406static bool InvalidateRegDef(MachineBasicBlock::iterator I,
407 MachineInstr &NewDef, unsigned Reg,
408 bool &HasLiveDef) {
409 // Due to remat, it's possible this reg isn't being reused. That is,
410 // the def of this reg (by prev MI) is now dead.
411 MachineInstr *DefMI = I;
412 MachineOperand *DefOp = NULL;
413 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
414 MachineOperand &MO = DefMI->getOperand(i);
415 if (MO.isReg() && MO.isDef()) {
416 if (MO.getReg() == Reg)
417 DefOp = &MO;
418 else if (!MO.isDead())
419 HasLiveDef = true;
420 }
421 }
422 if (!DefOp)
423 return false;
424
425 bool FoundUse = false, Done = false;
426 MachineBasicBlock::iterator E = &NewDef;
427 ++I; ++E;
428 for (; !Done && I != E; ++I) {
429 MachineInstr *NMI = I;
430 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
431 MachineOperand &MO = NMI->getOperand(j);
432 if (!MO.isReg() || MO.getReg() != Reg)
433 continue;
434 if (MO.isUse())
435 FoundUse = true;
436 Done = true; // Stop after scanning all the operands of this MI.
437 }
438 }
439 if (!FoundUse) {
440 // Def is dead!
441 DefOp->setIsDead();
442 return true;
443 }
444 return false;
445}
446
447/// UpdateKills - Track and update kill info. If a MI reads a register that is
448/// marked kill, then it must be due to register reuse. Transfer the kill info
449/// over.
450static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
451 std::vector<MachineOperand*> &KillOps,
452 const TargetRegisterInfo* TRI) {
453 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
454 MachineOperand &MO = MI.getOperand(i);
455 if (!MO.isReg() || !MO.isUse())
456 continue;
457 unsigned Reg = MO.getReg();
458 if (Reg == 0)
459 continue;
460
461 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
462 // That can't be right. Register is killed but not re-defined and it's
463 // being reused. Let's fix that.
464 KillOps[Reg]->setIsKill(false);
465 KillOps[Reg] = NULL;
466 RegKills.reset(Reg);
467 if (!MI.isRegTiedToDefOperand(i))
468 // Unless it's a two-address operand, this is the new kill.
469 MO.setIsKill();
470 }
471 if (MO.isKill()) {
472 RegKills.set(Reg);
473 KillOps[Reg] = &MO;
474 }
475 }
476
477 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
478 const MachineOperand &MO = MI.getOperand(i);
479 if (!MO.isReg() || !MO.isDef())
480 continue;
481 unsigned Reg = MO.getReg();
482 RegKills.reset(Reg);
483 KillOps[Reg] = NULL;
484 // It also defines (or partially define) aliases.
485 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
486 RegKills.reset(*AS);
487 KillOps[*AS] = NULL;
488 }
489 }
490}
491
492/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
493///
494static void ReMaterialize(MachineBasicBlock &MBB,
495 MachineBasicBlock::iterator &MII,
496 unsigned DestReg, unsigned Reg,
497 const TargetInstrInfo *TII,
498 const TargetRegisterInfo *TRI,
499 VirtRegMap &VRM) {
500 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
501 MachineInstr *NewMI = prior(MII);
502 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
503 MachineOperand &MO = NewMI->getOperand(i);
504 if (!MO.isReg() || MO.getReg() == 0)
505 continue;
506 unsigned VirtReg = MO.getReg();
507 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
508 continue;
509 assert(MO.isUse());
510 unsigned SubIdx = MO.getSubReg();
511 unsigned Phys = VRM.getPhys(VirtReg);
512 assert(Phys);
513 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
514 MO.setReg(RReg);
515 MO.setSubReg(0);
516 }
517 ++NumReMats;
518}
519
520/// findSuperReg - Find the SubReg's super-register of given register class
521/// where its SubIdx sub-register is SubReg.
522static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
523 unsigned SubIdx, const TargetRegisterInfo *TRI) {
524 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
525 I != E; ++I) {
526 unsigned Reg = *I;
527 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
528 return Reg;
529 }
530 return 0;
531}
532
533// ******************************** //
534// Available Spills Implementation //
535// ******************************** //
536
537/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
538/// stackslot register. The register is still available but is no longer
539/// allowed to be modifed.
540void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
541 std::multimap<unsigned, int>::iterator I =
542 PhysRegsAvailable.lower_bound(PhysReg);
543 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
544 int SlotOrReMat = I->second;
545 I++;
546 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
547 "Bidirectional map mismatch!");
548 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
549 DOUT << "PhysReg " << TRI->getName(PhysReg)
550 << " copied, it is available for use but can no longer be modified\n";
551 }
552}
553
554/// disallowClobberPhysReg - Unset the CanClobber bit of the specified
555/// stackslot register and its aliases. The register and its aliases may
556/// still available but is no longer allowed to be modifed.
557void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
558 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
559 disallowClobberPhysRegOnly(*AS);
560 disallowClobberPhysRegOnly(PhysReg);
561}
562
563/// ClobberPhysRegOnly - This is called when the specified physreg changes
564/// value. We use this to invalidate any info about stuff we thing lives in it.
565void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
566 std::multimap<unsigned, int>::iterator I =
567 PhysRegsAvailable.lower_bound(PhysReg);
568 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
569 int SlotOrReMat = I->second;
570 PhysRegsAvailable.erase(I++);
571 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
572 "Bidirectional map mismatch!");
573 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
574 DOUT << "PhysReg " << TRI->getName(PhysReg)
575 << " clobbered, invalidating ";
576 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
577 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
578 else
579 DOUT << "SS#" << SlotOrReMat << "\n";
580 }
581}
582
583/// ClobberPhysReg - This is called when the specified physreg changes
584/// value. We use this to invalidate any info about stuff we thing lives in
585/// it and any of its aliases.
586void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
587 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
588 ClobberPhysRegOnly(*AS);
589 ClobberPhysRegOnly(PhysReg);
590}
591
592/// AddAvailableRegsToLiveIn - Availability information is being kept coming
593/// into the specified MBB. Add available physical registers as potential
594/// live-in's. If they are reused in the MBB, they will be added to the
595/// live-in set to make register scavenger and post-allocation scheduler.
596void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
597 BitVector &RegKills,
598 std::vector<MachineOperand*> &KillOps) {
599 std::set<unsigned> NotAvailable;
600 for (std::multimap<unsigned, int>::iterator
601 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
602 I != E; ++I) {
603 unsigned Reg = I->first;
604 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
605 // FIXME: A temporary workaround. We can't reuse available value if it's
606 // not safe to move the def of the virtual register's class. e.g.
607 // X86::RFP* register classes. Do not add it as a live-in.
608 if (!TII->isSafeToMoveRegClassDefs(RC))
609 // This is no longer available.
610 NotAvailable.insert(Reg);
611 else {
612 MBB.addLiveIn(Reg);
613 InvalidateKill(Reg, RegKills, KillOps);
614 }
615
616 // Skip over the same register.
617 std::multimap<unsigned, int>::iterator NI = next(I);
618 while (NI != E && NI->first == Reg) {
619 ++I;
620 ++NI;
621 }
622 }
623
624 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
625 E = NotAvailable.end(); I != E; ++I) {
626 ClobberPhysReg(*I);
627 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
628 *SubRegs; ++SubRegs)
629 ClobberPhysReg(*SubRegs);
630 }
631}
632
633/// ModifyStackSlotOrReMat - This method is called when the value in a stack
634/// slot changes. This removes information about which register the previous
635/// value for this slot lives in (as the previous value is dead now).
636void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
637 std::map<int, unsigned>::iterator It =
638 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
639 if (It == SpillSlotsOrReMatsAvailable.end()) return;
640 unsigned Reg = It->second >> 1;
641 SpillSlotsOrReMatsAvailable.erase(It);
642
643 // This register may hold the value of multiple stack slots, only remove this
644 // stack slot from the set of values the register contains.
645 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
646 for (; ; ++I) {
647 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
648 "Map inverse broken!");
649 if (I->second == SlotOrReMat) break;
650 }
651 PhysRegsAvailable.erase(I);
652}
653
654// ************************** //
655// Reuse Info Implementation //
656// ************************** //
657
658/// GetRegForReload - We are about to emit a reload into PhysReg. If there
659/// is some other operand that is using the specified register, either pick
660/// a new register to use, or evict the previous reload and use this reg.
661unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI,
662 AvailableSpills &Spills,
663 std::vector<MachineInstr*> &MaybeDeadStores,
664 SmallSet<unsigned, 8> &Rejected,
665 BitVector &RegKills,
666 std::vector<MachineOperand*> &KillOps,
667 VirtRegMap &VRM) {
668 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
669 .getInstrInfo();
670
671 if (Reuses.empty()) return PhysReg; // This is most often empty.
672
673 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
674 ReusedOp &Op = Reuses[ro];
675 // If we find some other reuse that was supposed to use this register
676 // exactly for its reload, we can change this reload to use ITS reload
677 // register. That is, unless its reload register has already been
678 // considered and subsequently rejected because it has also been reused
679 // by another operand.
680 if (Op.PhysRegReused == PhysReg &&
681 Rejected.count(Op.AssignedPhysReg) == 0) {
682 // Yup, use the reload register that we didn't use before.
683 unsigned NewReg = Op.AssignedPhysReg;
684 Rejected.insert(PhysReg);
685 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
686 RegKills, KillOps, VRM);
687 } else {
688 // Otherwise, we might also have a problem if a previously reused
689 // value aliases the new register. If so, codegen the previous reload
690 // and use this one.
691 unsigned PRRU = Op.PhysRegReused;
692 const TargetRegisterInfo *TRI = Spills.getRegInfo();
693 if (TRI->areAliases(PRRU, PhysReg)) {
694 // Okay, we found out that an alias of a reused register
695 // was used. This isn't good because it means we have
696 // to undo a previous reuse.
697 MachineBasicBlock *MBB = MI->getParent();
698 const TargetRegisterClass *AliasRC =
699 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
700
701 // Copy Op out of the vector and remove it, we're going to insert an
702 // explicit load for it.
703 ReusedOp NewOp = Op;
704 Reuses.erase(Reuses.begin()+ro);
705
706 // Ok, we're going to try to reload the assigned physreg into the
707 // slot that we were supposed to in the first place. However, that
708 // register could hold a reuse. Check to see if it conflicts or
709 // would prefer us to use a different register.
710 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
711 MI, Spills, MaybeDeadStores,
712 Rejected, RegKills, KillOps, VRM);
713
714 MachineBasicBlock::iterator MII = MI;
715 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
716 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
717 } else {
718 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
719 NewOp.StackSlotOrReMat, AliasRC);
720 MachineInstr *LoadMI = prior(MII);
721 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
722 // Any stores to this stack slot are not dead anymore.
723 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
724 ++NumLoads;
725 }
726 Spills.ClobberPhysReg(NewPhysReg);
727 Spills.ClobberPhysReg(NewOp.PhysRegReused);
728
729 unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
730 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
731 MI->getOperand(NewOp.Operand).setReg(RReg);
732 MI->getOperand(NewOp.Operand).setSubReg(0);
733
734 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
735 --MII;
736 UpdateKills(*MII, RegKills, KillOps, TRI);
737 DOUT << '\t' << *MII;
738
739 DOUT << "Reuse undone!\n";
740 --NumReused;
741
742 // Finally, PhysReg is now available, go ahead and use it.
743 return PhysReg;
744 }
745 }
746 }
747 return PhysReg;
748}
749
750// ************************************************************************ //
751
752/// FoldsStackSlotModRef - Return true if the specified MI folds the specified
753/// stack slot mod/ref. It also checks if it's possible to unfold the
754/// instruction by having it define a specified physical register instead.
755static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
756 const TargetInstrInfo *TII,
757 const TargetRegisterInfo *TRI,
758 VirtRegMap &VRM) {
759 if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
760 return false;
761
762 bool Found = false;
763 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
764 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
765 unsigned VirtReg = I->second.first;
766 VirtRegMap::ModRef MR = I->second.second;
767 if (MR & VirtRegMap::isModRef)
768 if (VRM.getStackSlot(VirtReg) == SS) {
769 Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
770 break;
771 }
772 }
773 if (!Found)
774 return false;
775
776 // Does the instruction uses a register that overlaps the scratch register?
777 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI.getOperand(i);
779 if (!MO.isReg() || MO.getReg() == 0)
780 continue;
781 unsigned Reg = MO.getReg();
782 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
783 if (!VRM.hasPhys(Reg))
784 continue;
785 Reg = VRM.getPhys(Reg);
786 }
787 if (TRI->regsOverlap(PhysReg, Reg))
788 return false;
789 }
790 return true;
791}
792
793/// FindFreeRegister - Find a free register of a given register class by looking
794/// at (at most) the last two machine instructions.
795static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
796 MachineBasicBlock &MBB,
797 const TargetRegisterClass *RC,
798 const TargetRegisterInfo *TRI,
799 BitVector &AllocatableRegs) {
800 BitVector Defs(TRI->getNumRegs());
801 BitVector Uses(TRI->getNumRegs());
802 SmallVector<unsigned, 4> LocalUses;
803 SmallVector<unsigned, 4> Kills;
804
805 // Take a look at 2 instructions at most.
806 for (unsigned Count = 0; Count < 2; ++Count) {
807 if (MII == MBB.begin())
808 break;
809 MachineInstr *PrevMI = prior(MII);
810 for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
811 MachineOperand &MO = PrevMI->getOperand(i);
812 if (!MO.isReg() || MO.getReg() == 0)
813 continue;
814 unsigned Reg = MO.getReg();
815 if (MO.isDef()) {
816 Defs.set(Reg);
817 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
818 Defs.set(*AS);
819 } else {
820 LocalUses.push_back(Reg);
821 if (MO.isKill() && AllocatableRegs[Reg])
822 Kills.push_back(Reg);
823 }
824 }
825
826 for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
827 unsigned Kill = Kills[i];
828 if (!Defs[Kill] && !Uses[Kill] &&
829 TRI->getPhysicalRegisterRegClass(Kill) == RC)
830 return Kill;
831 }
832 for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
833 unsigned Reg = LocalUses[i];
834 Uses.set(Reg);
835 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
836 Uses.set(*AS);
837 }
838
839 MII = PrevMI;
840 }
841
842 return 0;
843}
844
845static
846void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg) {
847 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
848 MachineOperand &MO = MI->getOperand(i);
849 if (MO.isReg() && MO.getReg() == VirtReg)
850 MO.setReg(PhysReg);
851 }
852}
853
Evan Chengeca24fb2009-05-12 23:07:00 +0000854namespace {
855 struct RefSorter {
856 bool operator()(const std::pair<MachineInstr*, int> &A,
857 const std::pair<MachineInstr*, int> &B) {
858 return A.second < B.second;
859 }
860 };
861}
Lang Hames87e3bca2009-05-06 02:36:21 +0000862
863// ***************************** //
864// Local Spiller Implementation //
865// ***************************** //
866
867class VISIBILITY_HIDDEN LocalRewriter : public VirtRegRewriter {
868 MachineRegisterInfo *RegInfo;
869 const TargetRegisterInfo *TRI;
870 const TargetInstrInfo *TII;
871 BitVector AllocatableRegs;
872 DenseMap<MachineInstr*, unsigned> DistanceMap;
873public:
874
875 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
876 LiveIntervals* LIs) {
877 RegInfo = &MF.getRegInfo();
878 TRI = MF.getTarget().getRegisterInfo();
879 TII = MF.getTarget().getInstrInfo();
880 AllocatableRegs = TRI->getAllocatableSet(MF);
881 DOUT << "\n**** Local spiller rewriting function '"
882 << MF.getFunction()->getName() << "':\n";
883 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
884 " ****\n";
885 DEBUG(MF.dump());
886
887 // Spills - Keep track of which spilled values are available in physregs
888 // so that we can choose to reuse the physregs instead of emitting
889 // reloads. This is usually refreshed per basic block.
890 AvailableSpills Spills(TRI, TII);
891
892 // Keep track of kill information.
893 BitVector RegKills(TRI->getNumRegs());
894 std::vector<MachineOperand*> KillOps;
895 KillOps.resize(TRI->getNumRegs(), NULL);
896
897 // SingleEntrySuccs - Successor blocks which have a single predecessor.
898 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
899 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
900
901 // Traverse the basic blocks depth first.
902 MachineBasicBlock *Entry = MF.begin();
903 SmallPtrSet<MachineBasicBlock*,16> Visited;
904 for (df_ext_iterator<MachineBasicBlock*,
905 SmallPtrSet<MachineBasicBlock*,16> >
906 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
907 DFI != E; ++DFI) {
908 MachineBasicBlock *MBB = *DFI;
909 if (!EarlyVisited.count(MBB))
910 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
911
912 // If this MBB is the only predecessor of a successor. Keep the
913 // availability information and visit it next.
914 do {
915 // Keep visiting single predecessor successor as long as possible.
916 SinglePredSuccs.clear();
917 findSinglePredSuccessor(MBB, SinglePredSuccs);
918 if (SinglePredSuccs.empty())
919 MBB = 0;
920 else {
921 // FIXME: More than one successors, each of which has MBB has
922 // the only predecessor.
923 MBB = SinglePredSuccs[0];
924 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
925 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
926 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
927 }
928 }
929 } while (MBB);
930
931 // Clear the availability info.
932 Spills.clear();
933 }
934
935 DOUT << "**** Post Machine Instrs ****\n";
936 DEBUG(MF.dump());
937
938 // Mark unused spill slots.
939 MachineFrameInfo *MFI = MF.getFrameInfo();
940 int SS = VRM.getLowSpillSlot();
941 if (SS != VirtRegMap::NO_STACK_SLOT)
942 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
943 if (!VRM.isSpillSlotUsed(SS)) {
944 MFI->RemoveStackObject(SS);
945 ++NumDSS;
946 }
947
948 return true;
949 }
950
951private:
952
953 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
954 /// a scratch register is available.
955 /// xorq %r12<kill>, %r13
956 /// addq %rax, -184(%rbp)
957 /// addq %r13, -184(%rbp)
958 /// ==>
959 /// xorq %r12<kill>, %r13
960 /// movq -184(%rbp), %r12
961 /// addq %rax, %r12
962 /// addq %r13, %r12
963 /// movq %r12, -184(%rbp)
964 bool OptimizeByUnfold2(unsigned VirtReg, int SS,
965 MachineBasicBlock &MBB,
966 MachineBasicBlock::iterator &MII,
967 std::vector<MachineInstr*> &MaybeDeadStores,
968 AvailableSpills &Spills,
969 BitVector &RegKills,
970 std::vector<MachineOperand*> &KillOps,
971 VirtRegMap &VRM) {
972
973 MachineBasicBlock::iterator NextMII = next(MII);
974 if (NextMII == MBB.end())
975 return false;
976
977 if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
978 return false;
979
980 // Now let's see if the last couple of instructions happens to have freed up
981 // a register.
982 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
983 unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs);
984 if (!PhysReg)
985 return false;
986
987 MachineFunction &MF = *MBB.getParent();
988 TRI = MF.getTarget().getRegisterInfo();
989 MachineInstr &MI = *MII;
990 if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM))
991 return false;
992
993 // If the next instruction also folds the same SS modref and can be unfoled,
994 // then it's worthwhile to issue a load from SS into the free register and
995 // then unfold these instructions.
996 if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM))
997 return false;
998
999 // Load from SS to the spare physical register.
1000 TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC);
1001 // This invalidates Phys.
1002 Spills.ClobberPhysReg(PhysReg);
1003 // Remember it's available.
1004 Spills.addAvailable(SS, PhysReg);
1005 MaybeDeadStores[SS] = NULL;
1006
1007 // Unfold current MI.
1008 SmallVector<MachineInstr*, 4> NewMIs;
1009 if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
1010 assert(0 && "Unable unfold the load / store folding instruction!");
1011 assert(NewMIs.size() == 1);
1012 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1013 VRM.transferRestorePts(&MI, NewMIs[0]);
1014 MII = MBB.insert(MII, NewMIs[0]);
1015 InvalidateKills(MI, RegKills, KillOps);
1016 VRM.RemoveMachineInstrFromMaps(&MI);
1017 MBB.erase(&MI);
1018 ++NumModRefUnfold;
1019
1020 // Unfold next instructions that fold the same SS.
1021 do {
1022 MachineInstr &NextMI = *NextMII;
1023 NextMII = next(NextMII);
1024 NewMIs.clear();
1025 if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
1026 assert(0 && "Unable unfold the load / store folding instruction!");
1027 assert(NewMIs.size() == 1);
1028 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1029 VRM.transferRestorePts(&NextMI, NewMIs[0]);
1030 MBB.insert(NextMII, NewMIs[0]);
1031 InvalidateKills(NextMI, RegKills, KillOps);
1032 VRM.RemoveMachineInstrFromMaps(&NextMI);
1033 MBB.erase(&NextMI);
1034 ++NumModRefUnfold;
1035 } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM));
1036
1037 // Store the value back into SS.
1038 TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC);
1039 MachineInstr *StoreMI = prior(NextMII);
1040 VRM.addSpillSlotUse(SS, StoreMI);
1041 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1042
1043 return true;
1044 }
1045
1046 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1047 /// instruction. e.g.
1048 /// xorl %edi, %eax
1049 /// movl %eax, -32(%ebp)
1050 /// movl -36(%ebp), %eax
1051 /// orl %eax, -32(%ebp)
1052 /// ==>
1053 /// xorl %edi, %eax
1054 /// orl -36(%ebp), %eax
1055 /// mov %eax, -32(%ebp)
1056 /// This enables unfolding optimization for a subsequent instruction which will
1057 /// also eliminate the newly introduced store instruction.
1058 bool OptimizeByUnfold(MachineBasicBlock &MBB,
1059 MachineBasicBlock::iterator &MII,
1060 std::vector<MachineInstr*> &MaybeDeadStores,
1061 AvailableSpills &Spills,
1062 BitVector &RegKills,
1063 std::vector<MachineOperand*> &KillOps,
1064 VirtRegMap &VRM) {
1065 MachineFunction &MF = *MBB.getParent();
1066 MachineInstr &MI = *MII;
1067 unsigned UnfoldedOpc = 0;
1068 unsigned UnfoldPR = 0;
1069 unsigned UnfoldVR = 0;
1070 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1071 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1072 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1073 // Only transform a MI that folds a single register.
1074 if (UnfoldedOpc)
1075 return false;
1076 UnfoldVR = I->second.first;
1077 VirtRegMap::ModRef MR = I->second.second;
1078 // MI2VirtMap be can updated which invalidate the iterator.
1079 // Increment the iterator first.
1080 ++I;
1081 if (VRM.isAssignedReg(UnfoldVR))
1082 continue;
1083 // If this reference is not a use, any previous store is now dead.
1084 // Otherwise, the store to this stack slot is not dead anymore.
1085 FoldedSS = VRM.getStackSlot(UnfoldVR);
1086 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1087 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1088 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1089 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1090 continue;
1091 UnfoldPR = PhysReg;
1092 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1093 false, true);
1094 }
1095 }
1096
1097 if (!UnfoldedOpc) {
1098 if (!UnfoldVR)
1099 return false;
1100
1101 // Look for other unfolding opportunities.
1102 return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII,
1103 MaybeDeadStores, Spills, RegKills, KillOps, VRM);
1104 }
1105
1106 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1107 MachineOperand &MO = MI.getOperand(i);
1108 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1109 continue;
1110 unsigned VirtReg = MO.getReg();
1111 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1112 continue;
1113 if (VRM.isAssignedReg(VirtReg)) {
1114 unsigned PhysReg = VRM.getPhys(VirtReg);
1115 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1116 return false;
1117 } else if (VRM.isReMaterialized(VirtReg))
1118 continue;
1119 int SS = VRM.getStackSlot(VirtReg);
1120 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1121 if (PhysReg) {
1122 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1123 return false;
1124 continue;
1125 }
1126 if (VRM.hasPhys(VirtReg)) {
1127 PhysReg = VRM.getPhys(VirtReg);
1128 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1129 continue;
1130 }
1131
1132 // Ok, we'll need to reload the value into a register which makes
1133 // it impossible to perform the store unfolding optimization later.
1134 // Let's see if it is possible to fold the load if the store is
1135 // unfolded. This allows us to perform the store unfolding
1136 // optimization.
1137 SmallVector<MachineInstr*, 4> NewMIs;
1138 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1139 assert(NewMIs.size() == 1);
1140 MachineInstr *NewMI = NewMIs.back();
1141 NewMIs.clear();
1142 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1143 assert(Idx != -1);
1144 SmallVector<unsigned, 1> Ops;
1145 Ops.push_back(Idx);
1146 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
1147 if (FoldedMI) {
1148 VRM.addSpillSlotUse(SS, FoldedMI);
1149 if (!VRM.hasPhys(UnfoldVR))
1150 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
1151 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1152 MII = MBB.insert(MII, FoldedMI);
1153 InvalidateKills(MI, RegKills, KillOps);
1154 VRM.RemoveMachineInstrFromMaps(&MI);
1155 MBB.erase(&MI);
1156 MF.DeleteMachineInstr(NewMI);
1157 return true;
1158 }
1159 MF.DeleteMachineInstr(NewMI);
1160 }
1161 }
1162
1163 return false;
1164 }
1165
1166 /// CommuteToFoldReload -
1167 /// Look for
1168 /// r1 = load fi#1
1169 /// r1 = op r1, r2<kill>
1170 /// store r1, fi#1
1171 ///
1172 /// If op is commutable and r2 is killed, then we can xform these to
1173 /// r2 = op r2, fi#1
1174 /// store r2, fi#1
1175 bool CommuteToFoldReload(MachineBasicBlock &MBB,
1176 MachineBasicBlock::iterator &MII,
1177 unsigned VirtReg, unsigned SrcReg, int SS,
1178 AvailableSpills &Spills,
1179 BitVector &RegKills,
1180 std::vector<MachineOperand*> &KillOps,
1181 const TargetRegisterInfo *TRI,
1182 VirtRegMap &VRM) {
1183 if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
1184 return false;
1185
1186 MachineFunction &MF = *MBB.getParent();
1187 MachineInstr &MI = *MII;
1188 MachineBasicBlock::iterator DefMII = prior(MII);
1189 MachineInstr *DefMI = DefMII;
1190 const TargetInstrDesc &TID = DefMI->getDesc();
1191 unsigned NewDstIdx;
1192 if (DefMII != MBB.begin() &&
1193 TID.isCommutable() &&
1194 TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
1195 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1196 unsigned NewReg = NewDstMO.getReg();
1197 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1198 return false;
1199 MachineInstr *ReloadMI = prior(DefMII);
1200 int FrameIdx;
1201 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1202 if (DestReg != SrcReg || FrameIdx != SS)
1203 return false;
1204 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1205 if (UseIdx == -1)
1206 return false;
1207 unsigned DefIdx;
1208 if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1209 return false;
1210 assert(DefMI->getOperand(DefIdx).isReg() &&
1211 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1212
1213 // Now commute def instruction.
1214 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1215 if (!CommutedMI)
1216 return false;
1217 SmallVector<unsigned, 1> Ops;
1218 Ops.push_back(NewDstIdx);
1219 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1220 // Not needed since foldMemoryOperand returns new MI.
1221 MF.DeleteMachineInstr(CommutedMI);
1222 if (!FoldedMI)
1223 return false;
1224
1225 VRM.addSpillSlotUse(SS, FoldedMI);
1226 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1227 // Insert new def MI and spill MI.
1228 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1229 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
1230 MII = prior(MII);
1231 MachineInstr *StoreMI = MII;
1232 VRM.addSpillSlotUse(SS, StoreMI);
1233 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1234 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
1235
1236 // Delete all 3 old instructions.
1237 InvalidateKills(*ReloadMI, RegKills, KillOps);
1238 VRM.RemoveMachineInstrFromMaps(ReloadMI);
1239 MBB.erase(ReloadMI);
1240 InvalidateKills(*DefMI, RegKills, KillOps);
1241 VRM.RemoveMachineInstrFromMaps(DefMI);
1242 MBB.erase(DefMI);
1243 InvalidateKills(MI, RegKills, KillOps);
1244 VRM.RemoveMachineInstrFromMaps(&MI);
1245 MBB.erase(&MI);
1246
1247 // If NewReg was previously holding value of some SS, it's now clobbered.
1248 // This has to be done now because it's a physical register. When this
1249 // instruction is re-visited, it's ignored.
1250 Spills.ClobberPhysReg(NewReg);
1251
1252 ++NumCommutes;
1253 return true;
1254 }
1255
1256 return false;
1257 }
1258
1259 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1260 /// the last store to the same slot is now dead. If so, remove the last store.
1261 void SpillRegToStackSlot(MachineBasicBlock &MBB,
1262 MachineBasicBlock::iterator &MII,
1263 int Idx, unsigned PhysReg, int StackSlot,
1264 const TargetRegisterClass *RC,
1265 bool isAvailable, MachineInstr *&LastStore,
1266 AvailableSpills &Spills,
1267 SmallSet<MachineInstr*, 4> &ReMatDefs,
1268 BitVector &RegKills,
1269 std::vector<MachineOperand*> &KillOps,
1270 VirtRegMap &VRM) {
1271
1272 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
1273 MachineInstr *StoreMI = next(MII);
1274 VRM.addSpillSlotUse(StackSlot, StoreMI);
1275 DOUT << "Store:\t" << *StoreMI;
1276
1277 // If there is a dead store to this stack slot, nuke it now.
1278 if (LastStore) {
1279 DOUT << "Removed dead store:\t" << *LastStore;
1280 ++NumDSE;
1281 SmallVector<unsigned, 2> KillRegs;
1282 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
1283 MachineBasicBlock::iterator PrevMII = LastStore;
1284 bool CheckDef = PrevMII != MBB.begin();
1285 if (CheckDef)
1286 --PrevMII;
1287 VRM.RemoveMachineInstrFromMaps(LastStore);
1288 MBB.erase(LastStore);
1289 if (CheckDef) {
1290 // Look at defs of killed registers on the store. Mark the defs
1291 // as dead since the store has been deleted and they aren't
1292 // being reused.
1293 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1294 bool HasOtherDef = false;
1295 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
1296 MachineInstr *DeadDef = PrevMII;
1297 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1298 // FIXME: This assumes a remat def does not have side
1299 // effects.
1300 VRM.RemoveMachineInstrFromMaps(DeadDef);
1301 MBB.erase(DeadDef);
1302 ++NumDRM;
1303 }
1304 }
1305 }
1306 }
1307 }
1308
1309 LastStore = next(MII);
1310
1311 // If the stack slot value was previously available in some other
1312 // register, change it now. Otherwise, make the register available,
1313 // in PhysReg.
1314 Spills.ModifyStackSlotOrReMat(StackSlot);
1315 Spills.ClobberPhysReg(PhysReg);
1316 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1317 ++NumStores;
1318 }
1319
1320 /// TransferDeadness - A identity copy definition is dead and it's being
1321 /// removed. Find the last def or use and mark it as dead / kill.
1322 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
1323 unsigned Reg, BitVector &RegKills,
Evan Chengeca24fb2009-05-12 23:07:00 +00001324 std::vector<MachineOperand*> &KillOps,
1325 VirtRegMap &VRM) {
1326 SmallPtrSet<MachineInstr*, 4> Seens;
1327 SmallVector<std::pair<MachineInstr*, int>,8> Refs;
Lang Hames87e3bca2009-05-06 02:36:21 +00001328 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
1329 RE = RegInfo->reg_end(); RI != RE; ++RI) {
1330 MachineInstr *UDMI = &*RI;
1331 if (UDMI->getParent() != MBB)
1332 continue;
1333 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1334 if (DI == DistanceMap.end() || DI->second > CurDist)
1335 continue;
Evan Chengeca24fb2009-05-12 23:07:00 +00001336 if (Seens.insert(UDMI))
1337 Refs.push_back(std::make_pair(UDMI, DI->second));
Lang Hames87e3bca2009-05-06 02:36:21 +00001338 }
1339
Evan Chengeca24fb2009-05-12 23:07:00 +00001340 if (Refs.empty())
1341 return;
1342 std::sort(Refs.begin(), Refs.end(), RefSorter());
1343
1344 while (!Refs.empty()) {
1345 MachineInstr *LastUDMI = Refs.back().first;
1346 Refs.pop_back();
1347
Lang Hames87e3bca2009-05-06 02:36:21 +00001348 MachineOperand *LastUD = NULL;
1349 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1350 MachineOperand &MO = LastUDMI->getOperand(i);
1351 if (!MO.isReg() || MO.getReg() != Reg)
1352 continue;
1353 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1354 LastUD = &MO;
1355 if (LastUDMI->isRegTiedToDefOperand(i))
Evan Chengeca24fb2009-05-12 23:07:00 +00001356 break;
Lang Hames87e3bca2009-05-06 02:36:21 +00001357 }
Evan Chengeca24fb2009-05-12 23:07:00 +00001358 if (LastUD->isDef()) {
1359 // If the instruction has no side effect, delete it and propagate
1360 // backward further. Otherwise, mark is dead and we are done.
1361 const TargetInstrDesc &TID = LastUDMI->getDesc();
1362 if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
1363 TID.hasUnmodeledSideEffects()) {
1364 LastUD->setIsDead();
1365 break;
1366 }
1367 VRM.RemoveMachineInstrFromMaps(LastUDMI);
1368 MBB->erase(LastUDMI);
1369 } else {
Lang Hames87e3bca2009-05-06 02:36:21 +00001370 LastUD->setIsKill();
1371 RegKills.set(Reg);
1372 KillOps[Reg] = LastUD;
Evan Chengeca24fb2009-05-12 23:07:00 +00001373 break;
Lang Hames87e3bca2009-05-06 02:36:21 +00001374 }
1375 }
1376 }
1377
1378 /// rewriteMBB - Keep track of which spills are available even after the
1379 /// register allocator is done with them. If possible, avid reloading vregs.
1380 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
1381 LiveIntervals *LIs,
1382 AvailableSpills &Spills, BitVector &RegKills,
1383 std::vector<MachineOperand*> &KillOps) {
1384
1385 DOUT << "\n**** Local spiller rewriting MBB '"
1386 << MBB.getBasicBlock()->getName() << "':\n";
1387
1388 MachineFunction &MF = *MBB.getParent();
1389
1390 // MaybeDeadStores - When we need to write a value back into a stack slot,
1391 // keep track of the inserted store. If the stack slot value is never read
1392 // (because the value was used from some available register, for example), and
1393 // subsequently stored to, the original store is dead. This map keeps track
1394 // of inserted stores that are not used. If we see a subsequent store to the
1395 // same stack slot, the original store is deleted.
1396 std::vector<MachineInstr*> MaybeDeadStores;
1397 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1398
1399 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1400 SmallSet<MachineInstr*, 4> ReMatDefs;
1401
1402 // Clear kill info.
1403 SmallSet<unsigned, 2> KilledMIRegs;
1404 RegKills.reset();
1405 KillOps.clear();
1406 KillOps.resize(TRI->getNumRegs(), NULL);
1407
1408 unsigned Dist = 0;
1409 DistanceMap.clear();
1410 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
1411 MII != E; ) {
1412 MachineBasicBlock::iterator NextMII = next(MII);
1413
1414 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1415 bool Erased = false;
1416 bool BackTracked = false;
1417 if (OptimizeByUnfold(MBB, MII,
1418 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
1419 NextMII = next(MII);
1420
1421 MachineInstr &MI = *MII;
1422
1423 if (VRM.hasEmergencySpills(&MI)) {
1424 // Spill physical register(s) in the rare case the allocator has run out
1425 // of registers to allocate.
1426 SmallSet<int, 4> UsedSS;
1427 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
1428 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1429 unsigned PhysReg = EmSpills[i];
1430 const TargetRegisterClass *RC =
1431 TRI->getPhysicalRegisterRegClass(PhysReg);
1432 assert(RC && "Unable to determine register class!");
1433 int SS = VRM.getEmergencySpillSlot(RC);
1434 if (UsedSS.count(SS))
1435 assert(0 && "Need to spill more than one physical registers!");
1436 UsedSS.insert(SS);
1437 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
1438 MachineInstr *StoreMI = prior(MII);
1439 VRM.addSpillSlotUse(SS, StoreMI);
1440 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
1441 MachineInstr *LoadMI = next(MII);
1442 VRM.addSpillSlotUse(SS, LoadMI);
1443 ++NumPSpills;
1444 }
1445 NextMII = next(MII);
1446 }
1447
1448 // Insert restores here if asked to.
1449 if (VRM.isRestorePt(&MI)) {
1450 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
1451 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1452 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1453 if (!VRM.getPreSplitReg(VirtReg))
1454 continue; // Split interval spilled again.
1455 unsigned Phys = VRM.getPhys(VirtReg);
1456 RegInfo->setPhysRegUsed(Phys);
1457
1458 // Check if the value being restored if available. If so, it must be
1459 // from a predecessor BB that fallthrough into this BB. We do not
1460 // expect:
1461 // BB1:
1462 // r1 = load fi#1
1463 // ...
1464 // = r1<kill>
1465 // ... # r1 not clobbered
1466 // ...
1467 // = load fi#1
1468 bool DoReMat = VRM.isReMaterialized(VirtReg);
1469 int SSorRMId = DoReMat
1470 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1471 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1472 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1473 if (InReg == Phys) {
1474 // If the value is already available in the expected register, save
1475 // a reload / remat.
1476 if (SSorRMId)
1477 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1478 else
1479 DOUT << "Reusing SS#" << SSorRMId;
1480 DOUT << " from physreg "
1481 << TRI->getName(InReg) << " for vreg"
1482 << VirtReg <<" instead of reloading into physreg "
1483 << TRI->getName(Phys) << "\n";
1484 ++NumOmitted;
1485 continue;
1486 } else if (InReg && InReg != Phys) {
1487 if (SSorRMId)
1488 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1489 else
1490 DOUT << "Reusing SS#" << SSorRMId;
1491 DOUT << " from physreg "
1492 << TRI->getName(InReg) << " for vreg"
1493 << VirtReg <<" by copying it into physreg "
1494 << TRI->getName(Phys) << "\n";
1495
1496 // If the reloaded / remat value is available in another register,
1497 // copy it to the desired register.
1498 TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC);
1499
1500 // This invalidates Phys.
1501 Spills.ClobberPhysReg(Phys);
1502 // Remember it's available.
1503 Spills.addAvailable(SSorRMId, Phys);
1504
1505 // Mark is killed.
1506 MachineInstr *CopyMI = prior(MII);
1507 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1508 KillOpnd->setIsKill();
1509 UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1510
1511 DOUT << '\t' << *CopyMI;
1512 ++NumCopified;
1513 continue;
1514 }
1515
1516 if (VRM.isReMaterialized(VirtReg)) {
1517 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
1518 } else {
1519 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1520 TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC);
1521 MachineInstr *LoadMI = prior(MII);
1522 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1523 ++NumLoads;
1524 }
1525
1526 // This invalidates Phys.
1527 Spills.ClobberPhysReg(Phys);
1528 // Remember it's available.
1529 Spills.addAvailable(SSorRMId, Phys);
1530
1531 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1532 DOUT << '\t' << *prior(MII);
1533 }
1534 }
1535
1536 // Insert spills here if asked to.
1537 if (VRM.isSpillPt(&MI)) {
1538 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1539 VRM.getSpillPtSpills(&MI);
1540 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1541 unsigned VirtReg = SpillRegs[i].first;
1542 bool isKill = SpillRegs[i].second;
1543 if (!VRM.getPreSplitReg(VirtReg))
1544 continue; // Split interval spilled again.
1545 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1546 unsigned Phys = VRM.getPhys(VirtReg);
1547 int StackSlot = VRM.getStackSlot(VirtReg);
1548 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1549 MachineInstr *StoreMI = next(MII);
1550 VRM.addSpillSlotUse(StackSlot, StoreMI);
1551 DOUT << "Store:\t" << *StoreMI;
1552 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1553 }
1554 NextMII = next(MII);
1555 }
1556
1557 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1558 /// reuse.
1559 ReuseInfo ReusedOperands(MI, TRI);
1560 SmallVector<unsigned, 4> VirtUseOps;
1561 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1562 MachineOperand &MO = MI.getOperand(i);
1563 if (!MO.isReg() || MO.getReg() == 0)
1564 continue; // Ignore non-register operands.
1565
1566 unsigned VirtReg = MO.getReg();
1567 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1568 // Ignore physregs for spilling, but remember that it is used by this
1569 // function.
1570 RegInfo->setPhysRegUsed(VirtReg);
1571 continue;
1572 }
1573
1574 // We want to process implicit virtual register uses first.
1575 if (MO.isImplicit())
1576 // If the virtual register is implicitly defined, emit a implicit_def
1577 // before so scavenger knows it's "defined".
1578 VirtUseOps.insert(VirtUseOps.begin(), i);
1579 else
1580 VirtUseOps.push_back(i);
1581 }
1582
1583 // Process all of the spilled uses and all non spilled reg references.
1584 SmallVector<int, 2> PotentialDeadStoreSlots;
1585 KilledMIRegs.clear();
1586 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1587 unsigned i = VirtUseOps[j];
1588 MachineOperand &MO = MI.getOperand(i);
1589 unsigned VirtReg = MO.getReg();
1590 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1591 "Not a virtual register?");
1592
1593 unsigned SubIdx = MO.getSubReg();
1594 if (VRM.isAssignedReg(VirtReg)) {
1595 // This virtual register was assigned a physreg!
1596 unsigned Phys = VRM.getPhys(VirtReg);
1597 RegInfo->setPhysRegUsed(Phys);
1598 if (MO.isDef())
1599 ReusedOperands.markClobbered(Phys);
1600 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1601 MI.getOperand(i).setReg(RReg);
1602 MI.getOperand(i).setSubReg(0);
1603 if (VRM.isImplicitlyDefined(VirtReg))
1604 BuildMI(MBB, &MI, MI.getDebugLoc(),
1605 TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1606 continue;
1607 }
1608
1609 // This virtual register is now known to be a spilled value.
1610 if (!MO.isUse())
1611 continue; // Handle defs in the loop below (handle use&def here though)
1612
1613 bool AvoidReload = false;
1614 if (LIs->hasInterval(VirtReg)) {
1615 LiveInterval &LI = LIs->getInterval(VirtReg);
1616 if (!LI.liveAt(LIs->getUseIndex(LI.beginNumber())))
1617 // Must be defined by an implicit def. It should not be spilled. Note,
1618 // this is for correctness reason. e.g.
1619 // 8 %reg1024<def> = IMPLICIT_DEF
1620 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1621 // The live range [12, 14) are not part of the r1024 live interval since
1622 // it's defined by an implicit def. It will not conflicts with live
1623 // interval of r1025. Now suppose both registers are spilled, you can
1624 // easily see a situation where both registers are reloaded before
1625 // the INSERT_SUBREG and both target registers that would overlap.
1626 AvoidReload = true;
1627 }
1628
1629 bool DoReMat = VRM.isReMaterialized(VirtReg);
1630 int SSorRMId = DoReMat
1631 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1632 int ReuseSlot = SSorRMId;
1633
1634 // Check to see if this stack slot is available.
1635 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1636
1637 // If this is a sub-register use, make sure the reuse register is in the
1638 // right register class. For example, for x86 not all of the 32-bit
1639 // registers have accessible sub-registers.
1640 // Similarly so for EXTRACT_SUBREG. Consider this:
1641 // EDI = op
1642 // MOV32_mr fi#1, EDI
1643 // ...
1644 // = EXTRACT_SUBREG fi#1
1645 // fi#1 is available in EDI, but it cannot be reused because it's not in
1646 // the right register file.
1647 if (PhysReg && !AvoidReload &&
1648 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1649 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1650 if (!RC->contains(PhysReg))
1651 PhysReg = 0;
1652 }
1653
1654 if (PhysReg && !AvoidReload) {
1655 // This spilled operand might be part of a two-address operand. If this
1656 // is the case, then changing it will necessarily require changing the
1657 // def part of the instruction as well. However, in some cases, we
1658 // aren't allowed to modify the reused register. If none of these cases
1659 // apply, reuse it.
1660 bool CanReuse = true;
1661 bool isTied = MI.isRegTiedToDefOperand(i);
1662 if (isTied) {
1663 // Okay, we have a two address operand. We can reuse this physreg as
1664 // long as we are allowed to clobber the value and there isn't an
1665 // earlier def that has already clobbered the physreg.
1666 CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
1667 Spills.canClobberPhysReg(PhysReg);
1668 }
1669
1670 if (CanReuse) {
1671 // If this stack slot value is already available, reuse it!
1672 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1673 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1674 else
1675 DOUT << "Reusing SS#" << ReuseSlot;
1676 DOUT << " from physreg "
1677 << TRI->getName(PhysReg) << " for vreg"
1678 << VirtReg <<" instead of reloading into physreg "
1679 << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
1680 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1681 MI.getOperand(i).setReg(RReg);
1682 MI.getOperand(i).setSubReg(0);
1683
1684 // The only technical detail we have is that we don't know that
1685 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1686 // later in the instruction. In particular, consider 'op V1, V2'.
1687 // If V1 is available in physreg R0, we would choose to reuse it
1688 // here, instead of reloading it into the register the allocator
1689 // indicated (say R1). However, V2 might have to be reloaded
1690 // later, and it might indicate that it needs to live in R0. When
1691 // this occurs, we need to have information available that
1692 // indicates it is safe to use R1 for the reload instead of R0.
1693 //
1694 // To further complicate matters, we might conflict with an alias,
1695 // or R0 and R1 might not be compatible with each other. In this
1696 // case, we actually insert a reload for V1 in R1, ensuring that
1697 // we can get at R0 or its alias.
1698 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1699 VRM.getPhys(VirtReg), VirtReg);
1700 if (isTied)
1701 // Only mark it clobbered if this is a use&def operand.
1702 ReusedOperands.markClobbered(PhysReg);
1703 ++NumReused;
1704
1705 if (MI.getOperand(i).isKill() &&
1706 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1707
1708 // The store of this spilled value is potentially dead, but we
1709 // won't know for certain until we've confirmed that the re-use
1710 // above is valid, which means waiting until the other operands
1711 // are processed. For now we just track the spill slot, we'll
1712 // remove it after the other operands are processed if valid.
1713
1714 PotentialDeadStoreSlots.push_back(ReuseSlot);
1715 }
1716
1717 // Mark is isKill if it's there no other uses of the same virtual
1718 // register and it's not a two-address operand. IsKill will be
1719 // unset if reg is reused.
1720 if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
1721 MI.getOperand(i).setIsKill();
1722 KilledMIRegs.insert(VirtReg);
1723 }
1724
1725 continue;
1726 } // CanReuse
1727
1728 // Otherwise we have a situation where we have a two-address instruction
1729 // whose mod/ref operand needs to be reloaded. This reload is already
1730 // available in some register "PhysReg", but if we used PhysReg as the
1731 // operand to our 2-addr instruction, the instruction would modify
1732 // PhysReg. This isn't cool if something later uses PhysReg and expects
1733 // to get its initial value.
1734 //
1735 // To avoid this problem, and to avoid doing a load right after a store,
1736 // we emit a copy from PhysReg into the designated register for this
1737 // operand.
1738 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1739 assert(DesignatedReg && "Must map virtreg to physreg!");
1740
1741 // Note that, if we reused a register for a previous operand, the
1742 // register we want to reload into might not actually be
1743 // available. If this occurs, use the register indicated by the
1744 // reuser.
1745 if (ReusedOperands.hasReuses())
1746 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1747 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1748
1749 // If the mapped designated register is actually the physreg we have
1750 // incoming, we don't need to inserted a dead copy.
1751 if (DesignatedReg == PhysReg) {
1752 // If this stack slot value is already available, reuse it!
1753 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1754 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1755 else
1756 DOUT << "Reusing SS#" << ReuseSlot;
1757 DOUT << " from physreg " << TRI->getName(PhysReg)
1758 << " for vreg" << VirtReg
1759 << " instead of reloading into same physreg.\n";
1760 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1761 MI.getOperand(i).setReg(RReg);
1762 MI.getOperand(i).setSubReg(0);
1763 ReusedOperands.markClobbered(RReg);
1764 ++NumReused;
1765 continue;
1766 }
1767
1768 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1769 RegInfo->setPhysRegUsed(DesignatedReg);
1770 ReusedOperands.markClobbered(DesignatedReg);
1771 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1772
1773 MachineInstr *CopyMI = prior(MII);
1774 UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1775
1776 // This invalidates DesignatedReg.
1777 Spills.ClobberPhysReg(DesignatedReg);
1778
1779 Spills.addAvailable(ReuseSlot, DesignatedReg);
1780 unsigned RReg =
1781 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1782 MI.getOperand(i).setReg(RReg);
1783 MI.getOperand(i).setSubReg(0);
1784 DOUT << '\t' << *prior(MII);
1785 ++NumReused;
1786 continue;
1787 } // if (PhysReg)
1788
1789 // Otherwise, reload it and remember that we have it.
1790 PhysReg = VRM.getPhys(VirtReg);
1791 assert(PhysReg && "Must map virtreg to physreg!");
1792
1793 // Note that, if we reused a register for a previous operand, the
1794 // register we want to reload into might not actually be
1795 // available. If this occurs, use the register indicated by the
1796 // reuser.
1797 if (ReusedOperands.hasReuses())
1798 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1799 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1800
1801 RegInfo->setPhysRegUsed(PhysReg);
1802 ReusedOperands.markClobbered(PhysReg);
1803 if (AvoidReload)
1804 ++NumAvoided;
1805 else {
1806 if (DoReMat) {
1807 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
1808 } else {
1809 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1810 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1811 MachineInstr *LoadMI = prior(MII);
1812 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1813 ++NumLoads;
1814 }
1815 // This invalidates PhysReg.
1816 Spills.ClobberPhysReg(PhysReg);
1817
1818 // Any stores to this stack slot are not dead anymore.
1819 if (!DoReMat)
1820 MaybeDeadStores[SSorRMId] = NULL;
1821 Spills.addAvailable(SSorRMId, PhysReg);
1822 // Assumes this is the last use. IsKill will be unset if reg is reused
1823 // unless it's a two-address operand.
1824 if (!MI.isRegTiedToDefOperand(i) &&
1825 KilledMIRegs.count(VirtReg) == 0) {
1826 MI.getOperand(i).setIsKill();
1827 KilledMIRegs.insert(VirtReg);
1828 }
1829
1830 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1831 DOUT << '\t' << *prior(MII);
1832 }
1833 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1834 MI.getOperand(i).setReg(RReg);
1835 MI.getOperand(i).setSubReg(0);
1836 }
1837
1838 // Ok - now we can remove stores that have been confirmed dead.
1839 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
1840 // This was the last use and the spilled value is still available
1841 // for reuse. That means the spill was unnecessary!
1842 int PDSSlot = PotentialDeadStoreSlots[j];
1843 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
1844 if (DeadStore) {
1845 DOUT << "Removed dead store:\t" << *DeadStore;
1846 InvalidateKills(*DeadStore, RegKills, KillOps);
1847 VRM.RemoveMachineInstrFromMaps(DeadStore);
1848 MBB.erase(DeadStore);
1849 MaybeDeadStores[PDSSlot] = NULL;
1850 ++NumDSE;
1851 }
1852 }
1853
1854
1855 DOUT << '\t' << MI;
1856
1857
1858 // If we have folded references to memory operands, make sure we clear all
1859 // physical registers that may contain the value of the spilled virtual
1860 // register
1861 SmallSet<int, 2> FoldedSS;
1862 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1863 unsigned VirtReg = I->second.first;
1864 VirtRegMap::ModRef MR = I->second.second;
1865 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
1866
1867 // MI2VirtMap be can updated which invalidate the iterator.
1868 // Increment the iterator first.
1869 ++I;
1870 int SS = VRM.getStackSlot(VirtReg);
1871 if (SS == VirtRegMap::NO_STACK_SLOT)
1872 continue;
1873 FoldedSS.insert(SS);
1874 DOUT << " - StackSlot: " << SS << "\n";
1875
1876 // If this folded instruction is just a use, check to see if it's a
1877 // straight load from the virt reg slot.
1878 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1879 int FrameIdx;
1880 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1881 if (DestReg && FrameIdx == SS) {
1882 // If this spill slot is available, turn it into a copy (or nothing)
1883 // instead of leaving it as a load!
1884 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1885 DOUT << "Promoted Load To Copy: " << MI;
1886 if (DestReg != InReg) {
1887 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1888 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1889 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
1890 unsigned SubIdx = DefMO->getSubReg();
1891 // Revisit the copy so we make sure to notice the effects of the
1892 // operation on the destreg (either needing to RA it if it's
1893 // virtual or needing to clobber any values if it's physical).
1894 NextMII = &MI;
1895 --NextMII; // backtrack to the copy.
1896 // Propagate the sub-register index over.
1897 if (SubIdx) {
1898 DefMO = NextMII->findRegisterDefOperand(DestReg);
1899 DefMO->setSubReg(SubIdx);
1900 }
1901
1902 // Mark is killed.
1903 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
1904 KillOpnd->setIsKill();
1905
1906 BackTracked = true;
1907 } else {
1908 DOUT << "Removing now-noop copy: " << MI;
1909 // Unset last kill since it's being reused.
1910 InvalidateKill(InReg, RegKills, KillOps);
1911 Spills.disallowClobberPhysReg(InReg);
1912 }
1913
1914 InvalidateKills(MI, RegKills, KillOps);
1915 VRM.RemoveMachineInstrFromMaps(&MI);
1916 MBB.erase(&MI);
1917 Erased = true;
1918 goto ProcessNextInst;
1919 }
1920 } else {
1921 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1922 SmallVector<MachineInstr*, 4> NewMIs;
1923 if (PhysReg &&
1924 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1925 MBB.insert(MII, NewMIs[0]);
1926 InvalidateKills(MI, RegKills, KillOps);
1927 VRM.RemoveMachineInstrFromMaps(&MI);
1928 MBB.erase(&MI);
1929 Erased = true;
1930 --NextMII; // backtrack to the unfolded instruction.
1931 BackTracked = true;
1932 goto ProcessNextInst;
1933 }
1934 }
1935 }
1936
1937 // If this reference is not a use, any previous store is now dead.
1938 // Otherwise, the store to this stack slot is not dead anymore.
1939 MachineInstr* DeadStore = MaybeDeadStores[SS];
1940 if (DeadStore) {
1941 bool isDead = !(MR & VirtRegMap::isRef);
1942 MachineInstr *NewStore = NULL;
1943 if (MR & VirtRegMap::isModRef) {
1944 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1945 SmallVector<MachineInstr*, 4> NewMIs;
1946 // We can reuse this physreg as long as we are allowed to clobber
1947 // the value and there isn't an earlier def that has already clobbered
1948 // the physreg.
1949 if (PhysReg &&
1950 !ReusedOperands.isClobbered(PhysReg) &&
1951 Spills.canClobberPhysReg(PhysReg) &&
1952 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
1953 MachineOperand *KillOpnd =
1954 DeadStore->findRegisterUseOperand(PhysReg, true);
1955 // Note, if the store is storing a sub-register, it's possible the
1956 // super-register is needed below.
1957 if (KillOpnd && !KillOpnd->getSubReg() &&
1958 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
1959 MBB.insert(MII, NewMIs[0]);
1960 NewStore = NewMIs[1];
1961 MBB.insert(MII, NewStore);
1962 VRM.addSpillSlotUse(SS, NewStore);
1963 InvalidateKills(MI, RegKills, KillOps);
1964 VRM.RemoveMachineInstrFromMaps(&MI);
1965 MBB.erase(&MI);
1966 Erased = true;
1967 --NextMII;
1968 --NextMII; // backtrack to the unfolded instruction.
1969 BackTracked = true;
1970 isDead = true;
1971 ++NumSUnfold;
1972 }
1973 }
1974 }
1975
1976 if (isDead) { // Previous store is dead.
1977 // If we get here, the store is dead, nuke it now.
1978 DOUT << "Removed dead store:\t" << *DeadStore;
1979 InvalidateKills(*DeadStore, RegKills, KillOps);
1980 VRM.RemoveMachineInstrFromMaps(DeadStore);
1981 MBB.erase(DeadStore);
1982 if (!NewStore)
1983 ++NumDSE;
1984 }
1985
1986 MaybeDeadStores[SS] = NULL;
1987 if (NewStore) {
1988 // Treat this store as a spill merged into a copy. That makes the
1989 // stack slot value available.
1990 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1991 goto ProcessNextInst;
1992 }
1993 }
1994
1995 // If the spill slot value is available, and this is a new definition of
1996 // the value, the value is not available anymore.
1997 if (MR & VirtRegMap::isMod) {
1998 // Notice that the value in this stack slot has been modified.
1999 Spills.ModifyStackSlotOrReMat(SS);
2000
2001 // If this is *just* a mod of the value, check to see if this is just a
2002 // store to the spill slot (i.e. the spill got merged into the copy). If
2003 // so, realize that the vreg is available now, and add the store to the
2004 // MaybeDeadStore info.
2005 int StackSlot;
2006 if (!(MR & VirtRegMap::isRef)) {
2007 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2008 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2009 "Src hasn't been allocated yet?");
2010
2011 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
2012 Spills, RegKills, KillOps, TRI, VRM)) {
2013 NextMII = next(MII);
2014 BackTracked = true;
2015 goto ProcessNextInst;
2016 }
2017
2018 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2019 // this as a potentially dead store in case there is a subsequent
2020 // store into the stack slot without a read from it.
2021 MaybeDeadStores[StackSlot] = &MI;
2022
2023 // If the stack slot value was previously available in some other
2024 // register, change it now. Otherwise, make the register
2025 // available in PhysReg.
2026 Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2027 }
2028 }
2029 }
2030 }
2031
2032 // Process all of the spilled defs.
2033 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2034 MachineOperand &MO = MI.getOperand(i);
2035 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2036 continue;
2037
2038 unsigned VirtReg = MO.getReg();
2039 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2040 // Check to see if this is a noop copy. If so, eliminate the
2041 // instruction before considering the dest reg to be changed.
2042 unsigned Src, Dst, SrcSR, DstSR;
2043 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
2044 ++NumDCE;
2045 DOUT << "Removing now-noop copy: " << MI;
2046 SmallVector<unsigned, 2> KillRegs;
2047 InvalidateKills(MI, RegKills, KillOps, &KillRegs);
2048 if (MO.isDead() && !KillRegs.empty()) {
2049 // Source register or an implicit super/sub-register use is killed.
2050 assert(KillRegs[0] == Dst ||
2051 TRI->isSubRegister(KillRegs[0], Dst) ||
2052 TRI->isSuperRegister(KillRegs[0], Dst));
2053 // Last def is now dead.
Evan Chengeca24fb2009-05-12 23:07:00 +00002054 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00002055 }
2056 VRM.RemoveMachineInstrFromMaps(&MI);
2057 MBB.erase(&MI);
2058 Erased = true;
2059 Spills.disallowClobberPhysReg(VirtReg);
2060 goto ProcessNextInst;
2061 }
2062
2063 // If it's not a no-op copy, it clobbers the value in the destreg.
2064 Spills.ClobberPhysReg(VirtReg);
2065 ReusedOperands.markClobbered(VirtReg);
2066
2067 // Check to see if this instruction is a load from a stack slot into
2068 // a register. If so, this provides the stack slot value in the reg.
2069 int FrameIdx;
2070 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2071 assert(DestReg == VirtReg && "Unknown load situation!");
2072
2073 // If it is a folded reference, then it's not safe to clobber.
2074 bool Folded = FoldedSS.count(FrameIdx);
2075 // Otherwise, if it wasn't available, remember that it is now!
2076 Spills.addAvailable(FrameIdx, DestReg, !Folded);
2077 goto ProcessNextInst;
2078 }
2079
2080 continue;
2081 }
2082
2083 unsigned SubIdx = MO.getSubReg();
2084 bool DoReMat = VRM.isReMaterialized(VirtReg);
2085 if (DoReMat)
2086 ReMatDefs.insert(&MI);
2087
2088 // The only vregs left are stack slot definitions.
2089 int StackSlot = VRM.getStackSlot(VirtReg);
2090 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
2091
2092 // If this def is part of a two-address operand, make sure to execute
2093 // the store from the correct physical register.
2094 unsigned PhysReg;
2095 unsigned TiedOp;
2096 if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2097 PhysReg = MI.getOperand(TiedOp).getReg();
2098 if (SubIdx) {
2099 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2100 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2101 "Can't find corresponding super-register!");
2102 PhysReg = SuperReg;
2103 }
2104 } else {
2105 PhysReg = VRM.getPhys(VirtReg);
2106 if (ReusedOperands.isClobbered(PhysReg)) {
2107 // Another def has taken the assigned physreg. It must have been a
2108 // use&def which got it due to reuse. Undo the reuse!
2109 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
2110 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
2111 }
2112 }
2113
2114 assert(PhysReg && "VR not assigned a physical register?");
2115 RegInfo->setPhysRegUsed(PhysReg);
2116 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2117 ReusedOperands.markClobbered(RReg);
2118 MI.getOperand(i).setReg(RReg);
2119 MI.getOperand(i).setSubReg(0);
2120
2121 if (!MO.isDead()) {
2122 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2123 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
2124 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
2125 NextMII = next(MII);
2126
2127 // Check to see if this is a noop copy. If so, eliminate the
2128 // instruction before considering the dest reg to be changed.
2129 {
2130 unsigned Src, Dst, SrcSR, DstSR;
2131 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
2132 ++NumDCE;
2133 DOUT << "Removing now-noop copy: " << MI;
2134 InvalidateKills(MI, RegKills, KillOps);
2135 VRM.RemoveMachineInstrFromMaps(&MI);
2136 MBB.erase(&MI);
2137 Erased = true;
2138 UpdateKills(*LastStore, RegKills, KillOps, TRI);
2139 goto ProcessNextInst;
2140 }
2141 }
2142 }
2143 }
2144 ProcessNextInst:
2145 DistanceMap.insert(std::make_pair(&MI, Dist++));
2146 if (!Erased && !BackTracked) {
2147 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2148 UpdateKills(*II, RegKills, KillOps, TRI);
2149 }
2150 MII = NextMII;
2151 }
2152
2153 }
2154
2155};
2156
2157llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2158 switch (RewriterOpt) {
2159 default: assert(0 && "Unreachable!");
2160 case local:
2161 return new LocalRewriter();
2162 case simple:
2163 return new SimpleRewriter();
2164 }
2165}