blob: e357214b8a255683a7a11a2a54cc786455086abf [file] [log] [blame]
Lang Hames87e3bca2009-05-06 02:36:21 +00001//===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "virtregrewriter"
11#include "VirtRegRewriter.h"
Benjamin Kramercfa6ec92009-08-23 11:37:21 +000012#include "llvm/Function.h"
13#include "llvm/CodeGen/MachineFrameInfo.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
Benjamin Kramercfa6ec92009-08-23 11:37:21 +000016#include "llvm/Support/CommandLine.h"
17#include "llvm/Support/Debug.h"
Torok Edwinc25e7582009-07-11 20:10:48 +000018#include "llvm/Support/ErrorHandling.h"
Daniel Dunbarce63ffb2009-07-25 00:23:56 +000019#include "llvm/Support/raw_ostream.h"
Benjamin Kramercfa6ec92009-08-23 11:37:21 +000020#include "llvm/Target/TargetInstrInfo.h"
David Greene2d4e6d32009-07-28 16:49:24 +000021#include "llvm/Target/TargetLowering.h"
Lang Hames87e3bca2009-05-06 02:36:21 +000022#include "llvm/ADT/DepthFirstIterator.h"
23#include "llvm/ADT/Statistic.h"
Lang Hames87e3bca2009-05-06 02:36:21 +000024#include <algorithm>
25using namespace llvm;
26
27STATISTIC(NumDSE , "Number of dead stores elided");
28STATISTIC(NumDSS , "Number of dead spill slots removed");
29STATISTIC(NumCommutes, "Number of instructions commuted");
30STATISTIC(NumDRM , "Number of re-materializable defs elided");
31STATISTIC(NumStores , "Number of stores added");
32STATISTIC(NumPSpills , "Number of physical register spills");
33STATISTIC(NumOmitted , "Number of reloads omited");
34STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
35STATISTIC(NumCopified, "Number of available reloads turned into copies");
36STATISTIC(NumReMats , "Number of re-materialization");
37STATISTIC(NumLoads , "Number of loads added");
38STATISTIC(NumReused , "Number of values reused");
39STATISTIC(NumDCE , "Number of copies elided");
40STATISTIC(NumSUnfold , "Number of stores unfolded");
41STATISTIC(NumModRefUnfold, "Number of modref unfolded");
42
43namespace {
Lang Hamesac276402009-06-04 18:45:36 +000044 enum RewriterName { local, trivial };
Lang Hames87e3bca2009-05-06 02:36:21 +000045}
46
47static cl::opt<RewriterName>
48RewriterOpt("rewriter",
49 cl::desc("Rewriter to use: (default: local)"),
50 cl::Prefix,
Lang Hamesac276402009-06-04 18:45:36 +000051 cl::values(clEnumVal(local, "local rewriter"),
Lang Hamesf41538d2009-06-02 16:53:25 +000052 clEnumVal(trivial, "trivial rewriter"),
Lang Hames87e3bca2009-05-06 02:36:21 +000053 clEnumValEnd),
54 cl::init(local));
55
Dan Gohman7db949d2009-08-07 01:32:21 +000056static cl::opt<bool>
David Greene2d4e6d32009-07-28 16:49:24 +000057ScheduleSpills("schedule-spills",
58 cl::desc("Schedule spill code"),
59 cl::init(false));
60
Lang Hames87e3bca2009-05-06 02:36:21 +000061VirtRegRewriter::~VirtRegRewriter() {}
62
Dan Gohman7db949d2009-08-07 01:32:21 +000063namespace {
Lang Hames87e3bca2009-05-06 02:36:21 +000064
Lang Hamesf41538d2009-06-02 16:53:25 +000065/// This class is intended for use with the new spilling framework only. It
66/// rewrites vreg def/uses to use the assigned preg, but does not insert any
67/// spill code.
Nick Lewycky6726b6d2009-10-25 06:33:48 +000068struct TrivialRewriter : public VirtRegRewriter {
Lang Hamesf41538d2009-06-02 16:53:25 +000069
70 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
71 LiveIntervals* LIs) {
Chris Lattner6456d382009-08-23 03:20:44 +000072 DEBUG(errs() << "********** REWRITE MACHINE CODE **********\n");
Daniel Dunbarce63ffb2009-07-25 00:23:56 +000073 DEBUG(errs() << "********** Function: "
74 << MF.getFunction()->getName() << '\n');
Chris Lattner6456d382009-08-23 03:20:44 +000075 DEBUG(errs() << "**** Machine Instrs"
76 << "(NOTE! Does not include spills and reloads!) ****\n");
David Greene2d4e6d32009-07-28 16:49:24 +000077 DEBUG(MF.dump());
78
Lang Hamesf41538d2009-06-02 16:53:25 +000079 MachineRegisterInfo *mri = &MF.getRegInfo();
80
81 bool changed = false;
82
83 for (LiveIntervals::iterator liItr = LIs->begin(), liEnd = LIs->end();
84 liItr != liEnd; ++liItr) {
85
86 if (TargetRegisterInfo::isVirtualRegister(liItr->first)) {
87 if (VRM.hasPhys(liItr->first)) {
88 unsigned preg = VRM.getPhys(liItr->first);
89 mri->replaceRegWith(liItr->first, preg);
90 mri->setPhysRegUsed(preg);
91 changed = true;
92 }
93 }
94 else {
95 if (!liItr->second->empty()) {
96 mri->setPhysRegUsed(liItr->first);
97 }
98 }
99 }
David Greene2d4e6d32009-07-28 16:49:24 +0000100
101
Chris Lattner6456d382009-08-23 03:20:44 +0000102 DEBUG(errs() << "**** Post Machine Instrs ****\n");
David Greene2d4e6d32009-07-28 16:49:24 +0000103 DEBUG(MF.dump());
Lang Hamesf41538d2009-06-02 16:53:25 +0000104
105 return changed;
106 }
107
108};
109
Dan Gohman7db949d2009-08-07 01:32:21 +0000110}
111
Lang Hames87e3bca2009-05-06 02:36:21 +0000112// ************************************************************************ //
113
Dan Gohman7db949d2009-08-07 01:32:21 +0000114namespace {
115
Lang Hames87e3bca2009-05-06 02:36:21 +0000116/// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
117/// from top down, keep track of which spill slots or remat are available in
118/// each register.
119///
120/// Note that not all physregs are created equal here. In particular, some
121/// physregs are reloads that we are allowed to clobber or ignore at any time.
122/// Other physregs are values that the register allocated program is using
123/// that we cannot CHANGE, but we can read if we like. We keep track of this
124/// on a per-stack-slot / remat id basis as the low bit in the value of the
125/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
126/// this bit and addAvailable sets it if.
Nick Lewycky6726b6d2009-10-25 06:33:48 +0000127class AvailableSpills {
Lang Hames87e3bca2009-05-06 02:36:21 +0000128 const TargetRegisterInfo *TRI;
129 const TargetInstrInfo *TII;
130
131 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
132 // or remat'ed virtual register values that are still available, due to
133 // being loaded or stored to, but not invalidated yet.
134 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
135
136 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
137 // indicating which stack slot values are currently held by a physreg. This
138 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
139 // physreg is modified.
140 std::multimap<unsigned, int> PhysRegsAvailable;
141
142 void disallowClobberPhysRegOnly(unsigned PhysReg);
143
144 void ClobberPhysRegOnly(unsigned PhysReg);
145public:
146 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
147 : TRI(tri), TII(tii) {
148 }
149
150 /// clear - Reset the state.
151 void clear() {
152 SpillSlotsOrReMatsAvailable.clear();
153 PhysRegsAvailable.clear();
154 }
155
156 const TargetRegisterInfo *getRegInfo() const { return TRI; }
157
158 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
159 /// available in a physical register, return that PhysReg, otherwise
160 /// return 0.
161 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
162 std::map<int, unsigned>::const_iterator I =
163 SpillSlotsOrReMatsAvailable.find(Slot);
164 if (I != SpillSlotsOrReMatsAvailable.end()) {
165 return I->second >> 1; // Remove the CanClobber bit.
166 }
167 return 0;
168 }
169
170 /// addAvailable - Mark that the specified stack slot / remat is available
171 /// in the specified physreg. If CanClobber is true, the physreg can be
172 /// modified at any time without changing the semantics of the program.
173 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
174 // If this stack slot is thought to be available in some other physreg,
175 // remove its record.
176 ModifyStackSlotOrReMat(SlotOrReMat);
177
178 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
179 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) |
180 (unsigned)CanClobber;
181
182 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
Chris Lattner6456d382009-08-23 03:20:44 +0000183 DEBUG(errs() << "Remembering RM#"
184 << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1);
Lang Hames87e3bca2009-05-06 02:36:21 +0000185 else
Chris Lattner6456d382009-08-23 03:20:44 +0000186 DEBUG(errs() << "Remembering SS#" << SlotOrReMat);
187 DEBUG(errs() << " in physreg " << TRI->getName(Reg) << "\n");
Lang Hames87e3bca2009-05-06 02:36:21 +0000188 }
189
190 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
191 /// the value of the specified stackslot register if it desires. The
192 /// specified stack slot must be available in a physreg for this query to
193 /// make sense.
194 bool canClobberPhysRegForSS(int SlotOrReMat) const {
195 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
196 "Value not available!");
197 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
198 }
199
200 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
201 /// physical register where values for some stack slot(s) might be
202 /// available.
203 bool canClobberPhysReg(unsigned PhysReg) const {
204 std::multimap<unsigned, int>::const_iterator I =
205 PhysRegsAvailable.lower_bound(PhysReg);
206 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
207 int SlotOrReMat = I->second;
208 I++;
209 if (!canClobberPhysRegForSS(SlotOrReMat))
210 return false;
211 }
212 return true;
213 }
214
215 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
216 /// stackslot register. The register is still available but is no longer
217 /// allowed to be modifed.
218 void disallowClobberPhysReg(unsigned PhysReg);
219
220 /// ClobberPhysReg - This is called when the specified physreg changes
221 /// value. We use this to invalidate any info about stuff that lives in
222 /// it and any of its aliases.
223 void ClobberPhysReg(unsigned PhysReg);
224
225 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
226 /// slot changes. This removes information about which register the
227 /// previous value for this slot lives in (as the previous value is dead
228 /// now).
229 void ModifyStackSlotOrReMat(int SlotOrReMat);
230
231 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
232 /// into the specified MBB. Add available physical registers as potential
233 /// live-in's. If they are reused in the MBB, they will be added to the
234 /// live-in set to make register scavenger and post-allocation scheduler.
235 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
236 std::vector<MachineOperand*> &KillOps);
237};
238
Dan Gohman7db949d2009-08-07 01:32:21 +0000239}
240
Lang Hames87e3bca2009-05-06 02:36:21 +0000241// ************************************************************************ //
242
David Greene2d4e6d32009-07-28 16:49:24 +0000243// Given a location where a reload of a spilled register or a remat of
244// a constant is to be inserted, attempt to find a safe location to
245// insert the load at an earlier point in the basic-block, to hide
246// latency of the load and to avoid address-generation interlock
247// issues.
248static MachineBasicBlock::iterator
249ComputeReloadLoc(MachineBasicBlock::iterator const InsertLoc,
250 MachineBasicBlock::iterator const Begin,
251 unsigned PhysReg,
252 const TargetRegisterInfo *TRI,
253 bool DoReMat,
254 int SSorRMId,
255 const TargetInstrInfo *TII,
256 const MachineFunction &MF)
257{
258 if (!ScheduleSpills)
259 return InsertLoc;
260
261 // Spill backscheduling is of primary interest to addresses, so
262 // don't do anything if the register isn't in the register class
263 // used for pointers.
264
265 const TargetLowering *TL = MF.getTarget().getTargetLowering();
266
267 if (!TL->isTypeLegal(TL->getPointerTy()))
268 // Believe it or not, this is true on PIC16.
269 return InsertLoc;
270
271 const TargetRegisterClass *ptrRegClass =
272 TL->getRegClassFor(TL->getPointerTy());
273 if (!ptrRegClass->contains(PhysReg))
274 return InsertLoc;
275
276 // Scan upwards through the preceding instructions. If an instruction doesn't
277 // reference the stack slot or the register we're loading, we can
278 // backschedule the reload up past it.
279 MachineBasicBlock::iterator NewInsertLoc = InsertLoc;
280 while (NewInsertLoc != Begin) {
281 MachineBasicBlock::iterator Prev = prior(NewInsertLoc);
282 for (unsigned i = 0; i < Prev->getNumOperands(); ++i) {
283 MachineOperand &Op = Prev->getOperand(i);
284 if (!DoReMat && Op.isFI() && Op.getIndex() == SSorRMId)
285 goto stop;
286 }
287 if (Prev->findRegisterUseOperandIdx(PhysReg) != -1 ||
288 Prev->findRegisterDefOperand(PhysReg))
289 goto stop;
290 for (const unsigned *Alias = TRI->getAliasSet(PhysReg); *Alias; ++Alias)
291 if (Prev->findRegisterUseOperandIdx(*Alias) != -1 ||
292 Prev->findRegisterDefOperand(*Alias))
293 goto stop;
294 NewInsertLoc = Prev;
295 }
296stop:;
297
298 // If we made it to the beginning of the block, turn around and move back
299 // down just past any existing reloads. They're likely to be reloads/remats
300 // for instructions earlier than what our current reload/remat is for, so
301 // they should be scheduled earlier.
302 if (NewInsertLoc == Begin) {
303 int FrameIdx;
304 while (InsertLoc != NewInsertLoc &&
305 (TII->isLoadFromStackSlot(NewInsertLoc, FrameIdx) ||
306 TII->isTriviallyReMaterializable(NewInsertLoc)))
307 ++NewInsertLoc;
308 }
309
310 return NewInsertLoc;
311}
Dan Gohman7db949d2009-08-07 01:32:21 +0000312
313namespace {
314
Lang Hames87e3bca2009-05-06 02:36:21 +0000315// ReusedOp - For each reused operand, we keep track of a bit of information,
316// in case we need to rollback upon processing a new operand. See comments
317// below.
318struct ReusedOp {
319 // The MachineInstr operand that reused an available value.
320 unsigned Operand;
321
322 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
323 unsigned StackSlotOrReMat;
324
325 // PhysRegReused - The physical register the value was available in.
326 unsigned PhysRegReused;
327
328 // AssignedPhysReg - The physreg that was assigned for use by the reload.
329 unsigned AssignedPhysReg;
330
331 // VirtReg - The virtual register itself.
332 unsigned VirtReg;
333
334 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
335 unsigned vreg)
336 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
337 AssignedPhysReg(apr), VirtReg(vreg) {}
338};
339
340/// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
341/// is reused instead of reloaded.
Nick Lewycky6726b6d2009-10-25 06:33:48 +0000342class ReuseInfo {
Lang Hames87e3bca2009-05-06 02:36:21 +0000343 MachineInstr &MI;
344 std::vector<ReusedOp> Reuses;
345 BitVector PhysRegsClobbered;
346public:
347 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
348 PhysRegsClobbered.resize(tri->getNumRegs());
349 }
350
351 bool hasReuses() const {
352 return !Reuses.empty();
353 }
354
355 /// addReuse - If we choose to reuse a virtual register that is already
356 /// available instead of reloading it, remember that we did so.
357 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
358 unsigned PhysRegReused, unsigned AssignedPhysReg,
359 unsigned VirtReg) {
360 // If the reload is to the assigned register anyway, no undo will be
361 // required.
362 if (PhysRegReused == AssignedPhysReg) return;
363
364 // Otherwise, remember this.
365 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
366 AssignedPhysReg, VirtReg));
367 }
368
369 void markClobbered(unsigned PhysReg) {
370 PhysRegsClobbered.set(PhysReg);
371 }
372
373 bool isClobbered(unsigned PhysReg) const {
374 return PhysRegsClobbered.test(PhysReg);
375 }
376
377 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
378 /// is some other operand that is using the specified register, either pick
379 /// a new register to use, or evict the previous reload and use this reg.
Evan Cheng5d885022009-07-21 09:15:00 +0000380 unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
381 MachineFunction &MF, MachineInstr *MI,
Lang Hames87e3bca2009-05-06 02:36:21 +0000382 AvailableSpills &Spills,
383 std::vector<MachineInstr*> &MaybeDeadStores,
384 SmallSet<unsigned, 8> &Rejected,
385 BitVector &RegKills,
386 std::vector<MachineOperand*> &KillOps,
387 VirtRegMap &VRM);
388
389 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
390 /// 'Rejected' set to remember which registers have been considered and
391 /// rejected for the reload. This avoids infinite looping in case like
392 /// this:
393 /// t1 := op t2, t3
394 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
395 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
396 /// t1 <- desires r1
397 /// sees r1 is taken by t2, tries t2's reload register r0
398 /// sees r0 is taken by t3, tries t3's reload register r1
399 /// sees r1 is taken by t2, tries t2's reload register r0 ...
Evan Cheng5d885022009-07-21 09:15:00 +0000400 unsigned GetRegForReload(unsigned VirtReg, unsigned PhysReg, MachineInstr *MI,
Lang Hames87e3bca2009-05-06 02:36:21 +0000401 AvailableSpills &Spills,
402 std::vector<MachineInstr*> &MaybeDeadStores,
403 BitVector &RegKills,
404 std::vector<MachineOperand*> &KillOps,
405 VirtRegMap &VRM) {
406 SmallSet<unsigned, 8> Rejected;
Evan Cheng5d885022009-07-21 09:15:00 +0000407 MachineFunction &MF = *MI->getParent()->getParent();
408 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
409 return GetRegForReload(RC, PhysReg, MF, MI, Spills, MaybeDeadStores,
410 Rejected, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +0000411 }
412};
413
Dan Gohman7db949d2009-08-07 01:32:21 +0000414}
Lang Hames87e3bca2009-05-06 02:36:21 +0000415
416// ****************** //
417// Utility Functions //
418// ****************** //
419
Lang Hames87e3bca2009-05-06 02:36:21 +0000420/// findSinglePredSuccessor - Return via reference a vector of machine basic
421/// blocks each of which is a successor of the specified BB and has no other
422/// predecessor.
423static void findSinglePredSuccessor(MachineBasicBlock *MBB,
424 SmallVectorImpl<MachineBasicBlock *> &Succs) {
425 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
426 SE = MBB->succ_end(); SI != SE; ++SI) {
427 MachineBasicBlock *SuccMBB = *SI;
428 if (SuccMBB->pred_size() == 1)
429 Succs.push_back(SuccMBB);
430 }
431}
432
Evan Cheng427a6b62009-05-15 06:48:19 +0000433/// InvalidateKill - Invalidate register kill information for a specific
434/// register. This also unsets the kills marker on the last kill operand.
435static void InvalidateKill(unsigned Reg,
436 const TargetRegisterInfo* TRI,
437 BitVector &RegKills,
438 std::vector<MachineOperand*> &KillOps) {
439 if (RegKills[Reg]) {
440 KillOps[Reg]->setIsKill(false);
Evan Cheng2c48fe62009-06-03 09:00:27 +0000441 // KillOps[Reg] might be a def of a super-register.
442 unsigned KReg = KillOps[Reg]->getReg();
443 KillOps[KReg] = NULL;
444 RegKills.reset(KReg);
445 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
Evan Cheng427a6b62009-05-15 06:48:19 +0000446 if (RegKills[*SR]) {
447 KillOps[*SR]->setIsKill(false);
448 KillOps[*SR] = NULL;
449 RegKills.reset(*SR);
450 }
451 }
452 }
453}
454
Lang Hames87e3bca2009-05-06 02:36:21 +0000455/// InvalidateKills - MI is going to be deleted. If any of its operands are
456/// marked kill, then invalidate the information.
Evan Cheng427a6b62009-05-15 06:48:19 +0000457static void InvalidateKills(MachineInstr &MI,
458 const TargetRegisterInfo* TRI,
459 BitVector &RegKills,
Lang Hames87e3bca2009-05-06 02:36:21 +0000460 std::vector<MachineOperand*> &KillOps,
461 SmallVector<unsigned, 2> *KillRegs = NULL) {
462 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
463 MachineOperand &MO = MI.getOperand(i);
Evan Cheng4784f1f2009-06-30 08:49:04 +0000464 if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
Lang Hames87e3bca2009-05-06 02:36:21 +0000465 continue;
466 unsigned Reg = MO.getReg();
467 if (TargetRegisterInfo::isVirtualRegister(Reg))
468 continue;
469 if (KillRegs)
470 KillRegs->push_back(Reg);
471 assert(Reg < KillOps.size());
472 if (KillOps[Reg] == &MO) {
Lang Hames87e3bca2009-05-06 02:36:21 +0000473 KillOps[Reg] = NULL;
Evan Cheng427a6b62009-05-15 06:48:19 +0000474 RegKills.reset(Reg);
475 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
476 if (RegKills[*SR]) {
477 KillOps[*SR] = NULL;
478 RegKills.reset(*SR);
479 }
480 }
Lang Hames87e3bca2009-05-06 02:36:21 +0000481 }
482 }
483}
484
485/// InvalidateRegDef - If the def operand of the specified def MI is now dead
486/// (since it's spill instruction is removed), mark it isDead. Also checks if
487/// the def MI has other definition operands that are not dead. Returns it by
488/// reference.
489static bool InvalidateRegDef(MachineBasicBlock::iterator I,
490 MachineInstr &NewDef, unsigned Reg,
491 bool &HasLiveDef) {
492 // Due to remat, it's possible this reg isn't being reused. That is,
493 // the def of this reg (by prev MI) is now dead.
494 MachineInstr *DefMI = I;
495 MachineOperand *DefOp = NULL;
496 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
497 MachineOperand &MO = DefMI->getOperand(i);
Evan Chenga5dc45e2009-10-26 04:56:07 +0000498 if (!MO.isReg() || !MO.isUse() || !MO.isKill() || MO.isUndef())
Evan Cheng4784f1f2009-06-30 08:49:04 +0000499 continue;
500 if (MO.getReg() == Reg)
501 DefOp = &MO;
502 else if (!MO.isDead())
503 HasLiveDef = true;
Lang Hames87e3bca2009-05-06 02:36:21 +0000504 }
505 if (!DefOp)
506 return false;
507
508 bool FoundUse = false, Done = false;
509 MachineBasicBlock::iterator E = &NewDef;
510 ++I; ++E;
511 for (; !Done && I != E; ++I) {
512 MachineInstr *NMI = I;
513 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
514 MachineOperand &MO = NMI->getOperand(j);
515 if (!MO.isReg() || MO.getReg() != Reg)
516 continue;
517 if (MO.isUse())
518 FoundUse = true;
519 Done = true; // Stop after scanning all the operands of this MI.
520 }
521 }
522 if (!FoundUse) {
523 // Def is dead!
524 DefOp->setIsDead();
525 return true;
526 }
527 return false;
528}
529
530/// UpdateKills - Track and update kill info. If a MI reads a register that is
531/// marked kill, then it must be due to register reuse. Transfer the kill info
532/// over.
Evan Cheng427a6b62009-05-15 06:48:19 +0000533static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
534 BitVector &RegKills,
535 std::vector<MachineOperand*> &KillOps) {
Lang Hames87e3bca2009-05-06 02:36:21 +0000536 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
537 MachineOperand &MO = MI.getOperand(i);
Evan Cheng4784f1f2009-06-30 08:49:04 +0000538 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
Lang Hames87e3bca2009-05-06 02:36:21 +0000539 continue;
540 unsigned Reg = MO.getReg();
541 if (Reg == 0)
542 continue;
543
544 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
545 // That can't be right. Register is killed but not re-defined and it's
546 // being reused. Let's fix that.
547 KillOps[Reg]->setIsKill(false);
Evan Cheng2c48fe62009-06-03 09:00:27 +0000548 // KillOps[Reg] might be a def of a super-register.
549 unsigned KReg = KillOps[Reg]->getReg();
550 KillOps[KReg] = NULL;
551 RegKills.reset(KReg);
552
553 // Must be a def of a super-register. Its other sub-regsters are no
554 // longer killed as well.
555 for (const unsigned *SR = TRI->getSubRegisters(KReg); *SR; ++SR) {
556 KillOps[*SR] = NULL;
557 RegKills.reset(*SR);
558 }
559
Lang Hames87e3bca2009-05-06 02:36:21 +0000560 if (!MI.isRegTiedToDefOperand(i))
561 // Unless it's a two-address operand, this is the new kill.
562 MO.setIsKill();
563 }
564 if (MO.isKill()) {
565 RegKills.set(Reg);
566 KillOps[Reg] = &MO;
Evan Cheng427a6b62009-05-15 06:48:19 +0000567 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
568 RegKills.set(*SR);
569 KillOps[*SR] = &MO;
570 }
Lang Hames87e3bca2009-05-06 02:36:21 +0000571 }
572 }
573
574 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
575 const MachineOperand &MO = MI.getOperand(i);
576 if (!MO.isReg() || !MO.isDef())
577 continue;
578 unsigned Reg = MO.getReg();
579 RegKills.reset(Reg);
580 KillOps[Reg] = NULL;
581 // It also defines (or partially define) aliases.
Evan Cheng427a6b62009-05-15 06:48:19 +0000582 for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR) {
583 RegKills.reset(*SR);
584 KillOps[*SR] = NULL;
Lang Hames87e3bca2009-05-06 02:36:21 +0000585 }
586 }
587}
588
589/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
590///
591static void ReMaterialize(MachineBasicBlock &MBB,
592 MachineBasicBlock::iterator &MII,
593 unsigned DestReg, unsigned Reg,
594 const TargetInstrInfo *TII,
595 const TargetRegisterInfo *TRI,
596 VirtRegMap &VRM) {
Evan Cheng5f159922009-07-16 20:15:00 +0000597 MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
Daniel Dunbar24cd3c42009-07-16 22:08:25 +0000598#ifndef NDEBUG
Evan Cheng5f159922009-07-16 20:15:00 +0000599 const TargetInstrDesc &TID = ReMatDefMI->getDesc();
Evan Chengc1b46f92009-07-17 00:32:06 +0000600 assert(TID.getNumDefs() == 1 &&
Evan Cheng5f159922009-07-16 20:15:00 +0000601 "Don't know how to remat instructions that define > 1 values!");
602#endif
603 TII->reMaterialize(MBB, MII, DestReg,
604 ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI);
Lang Hames87e3bca2009-05-06 02:36:21 +0000605 MachineInstr *NewMI = prior(MII);
606 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
607 MachineOperand &MO = NewMI->getOperand(i);
608 if (!MO.isReg() || MO.getReg() == 0)
609 continue;
610 unsigned VirtReg = MO.getReg();
611 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
612 continue;
613 assert(MO.isUse());
614 unsigned SubIdx = MO.getSubReg();
615 unsigned Phys = VRM.getPhys(VirtReg);
Evan Cheng427c3ba2009-10-25 07:51:47 +0000616 assert(Phys && "Virtual register is not assigned a register?");
Lang Hames87e3bca2009-05-06 02:36:21 +0000617 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
618 MO.setReg(RReg);
619 MO.setSubReg(0);
620 }
621 ++NumReMats;
622}
623
624/// findSuperReg - Find the SubReg's super-register of given register class
625/// where its SubIdx sub-register is SubReg.
626static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
627 unsigned SubIdx, const TargetRegisterInfo *TRI) {
628 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
629 I != E; ++I) {
630 unsigned Reg = *I;
631 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
632 return Reg;
633 }
634 return 0;
635}
636
637// ******************************** //
638// Available Spills Implementation //
639// ******************************** //
640
641/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
642/// stackslot register. The register is still available but is no longer
643/// allowed to be modifed.
644void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
645 std::multimap<unsigned, int>::iterator I =
646 PhysRegsAvailable.lower_bound(PhysReg);
647 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
648 int SlotOrReMat = I->second;
649 I++;
650 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
651 "Bidirectional map mismatch!");
652 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
Chris Lattner6456d382009-08-23 03:20:44 +0000653 DEBUG(errs() << "PhysReg " << TRI->getName(PhysReg)
654 << " copied, it is available for use but can no longer be modified\n");
Lang Hames87e3bca2009-05-06 02:36:21 +0000655 }
656}
657
658/// disallowClobberPhysReg - Unset the CanClobber bit of the specified
659/// stackslot register and its aliases. The register and its aliases may
660/// still available but is no longer allowed to be modifed.
661void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
662 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
663 disallowClobberPhysRegOnly(*AS);
664 disallowClobberPhysRegOnly(PhysReg);
665}
666
667/// ClobberPhysRegOnly - This is called when the specified physreg changes
668/// value. We use this to invalidate any info about stuff we thing lives in it.
669void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
670 std::multimap<unsigned, int>::iterator I =
671 PhysRegsAvailable.lower_bound(PhysReg);
672 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
673 int SlotOrReMat = I->second;
674 PhysRegsAvailable.erase(I++);
675 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
676 "Bidirectional map mismatch!");
677 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
Chris Lattner6456d382009-08-23 03:20:44 +0000678 DEBUG(errs() << "PhysReg " << TRI->getName(PhysReg)
679 << " clobbered, invalidating ");
Lang Hames87e3bca2009-05-06 02:36:21 +0000680 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
Chris Lattner6456d382009-08-23 03:20:44 +0000681 DEBUG(errs() << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 <<"\n");
Lang Hames87e3bca2009-05-06 02:36:21 +0000682 else
Chris Lattner6456d382009-08-23 03:20:44 +0000683 DEBUG(errs() << "SS#" << SlotOrReMat << "\n");
Lang Hames87e3bca2009-05-06 02:36:21 +0000684 }
685}
686
687/// ClobberPhysReg - This is called when the specified physreg changes
688/// value. We use this to invalidate any info about stuff we thing lives in
689/// it and any of its aliases.
690void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
691 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
692 ClobberPhysRegOnly(*AS);
693 ClobberPhysRegOnly(PhysReg);
694}
695
696/// AddAvailableRegsToLiveIn - Availability information is being kept coming
697/// into the specified MBB. Add available physical registers as potential
698/// live-in's. If they are reused in the MBB, they will be added to the
699/// live-in set to make register scavenger and post-allocation scheduler.
700void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
701 BitVector &RegKills,
702 std::vector<MachineOperand*> &KillOps) {
703 std::set<unsigned> NotAvailable;
704 for (std::multimap<unsigned, int>::iterator
705 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
706 I != E; ++I) {
707 unsigned Reg = I->first;
708 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
709 // FIXME: A temporary workaround. We can't reuse available value if it's
710 // not safe to move the def of the virtual register's class. e.g.
711 // X86::RFP* register classes. Do not add it as a live-in.
712 if (!TII->isSafeToMoveRegClassDefs(RC))
713 // This is no longer available.
714 NotAvailable.insert(Reg);
715 else {
716 MBB.addLiveIn(Reg);
Evan Cheng427a6b62009-05-15 06:48:19 +0000717 InvalidateKill(Reg, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +0000718 }
719
720 // Skip over the same register.
721 std::multimap<unsigned, int>::iterator NI = next(I);
722 while (NI != E && NI->first == Reg) {
723 ++I;
724 ++NI;
725 }
726 }
727
728 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
729 E = NotAvailable.end(); I != E; ++I) {
730 ClobberPhysReg(*I);
731 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
732 *SubRegs; ++SubRegs)
733 ClobberPhysReg(*SubRegs);
734 }
735}
736
737/// ModifyStackSlotOrReMat - This method is called when the value in a stack
738/// slot changes. This removes information about which register the previous
739/// value for this slot lives in (as the previous value is dead now).
740void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
741 std::map<int, unsigned>::iterator It =
742 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
743 if (It == SpillSlotsOrReMatsAvailable.end()) return;
744 unsigned Reg = It->second >> 1;
745 SpillSlotsOrReMatsAvailable.erase(It);
746
747 // This register may hold the value of multiple stack slots, only remove this
748 // stack slot from the set of values the register contains.
749 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
750 for (; ; ++I) {
751 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
752 "Map inverse broken!");
753 if (I->second == SlotOrReMat) break;
754 }
755 PhysRegsAvailable.erase(I);
756}
757
758// ************************** //
759// Reuse Info Implementation //
760// ************************** //
761
762/// GetRegForReload - We are about to emit a reload into PhysReg. If there
763/// is some other operand that is using the specified register, either pick
764/// a new register to use, or evict the previous reload and use this reg.
Evan Cheng5d885022009-07-21 09:15:00 +0000765unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
766 unsigned PhysReg,
767 MachineFunction &MF,
768 MachineInstr *MI, AvailableSpills &Spills,
Lang Hames87e3bca2009-05-06 02:36:21 +0000769 std::vector<MachineInstr*> &MaybeDeadStores,
770 SmallSet<unsigned, 8> &Rejected,
771 BitVector &RegKills,
772 std::vector<MachineOperand*> &KillOps,
773 VirtRegMap &VRM) {
Evan Cheng5d885022009-07-21 09:15:00 +0000774 const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
775 const TargetRegisterInfo *TRI = Spills.getRegInfo();
Lang Hames87e3bca2009-05-06 02:36:21 +0000776
777 if (Reuses.empty()) return PhysReg; // This is most often empty.
778
779 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
780 ReusedOp &Op = Reuses[ro];
781 // If we find some other reuse that was supposed to use this register
782 // exactly for its reload, we can change this reload to use ITS reload
783 // register. That is, unless its reload register has already been
784 // considered and subsequently rejected because it has also been reused
785 // by another operand.
786 if (Op.PhysRegReused == PhysReg &&
Evan Cheng5d885022009-07-21 09:15:00 +0000787 Rejected.count(Op.AssignedPhysReg) == 0 &&
788 RC->contains(Op.AssignedPhysReg)) {
Lang Hames87e3bca2009-05-06 02:36:21 +0000789 // Yup, use the reload register that we didn't use before.
790 unsigned NewReg = Op.AssignedPhysReg;
791 Rejected.insert(PhysReg);
Evan Cheng5d885022009-07-21 09:15:00 +0000792 return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores, Rejected,
Lang Hames87e3bca2009-05-06 02:36:21 +0000793 RegKills, KillOps, VRM);
794 } else {
795 // Otherwise, we might also have a problem if a previously reused
Evan Cheng5d885022009-07-21 09:15:00 +0000796 // value aliases the new register. If so, codegen the previous reload
Lang Hames87e3bca2009-05-06 02:36:21 +0000797 // and use this one.
798 unsigned PRRU = Op.PhysRegReused;
Lang Hames3f2f3f52009-09-03 02:52:02 +0000799 if (TRI->regsOverlap(PRRU, PhysReg)) {
Lang Hames87e3bca2009-05-06 02:36:21 +0000800 // Okay, we found out that an alias of a reused register
801 // was used. This isn't good because it means we have
802 // to undo a previous reuse.
803 MachineBasicBlock *MBB = MI->getParent();
804 const TargetRegisterClass *AliasRC =
805 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
806
807 // Copy Op out of the vector and remove it, we're going to insert an
808 // explicit load for it.
809 ReusedOp NewOp = Op;
810 Reuses.erase(Reuses.begin()+ro);
811
Jakob Stoklund Olesen46ff9692009-08-23 13:01:45 +0000812 // MI may be using only a sub-register of PhysRegUsed.
813 unsigned RealPhysRegUsed = MI->getOperand(NewOp.Operand).getReg();
814 unsigned SubIdx = 0;
815 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed) &&
816 "A reuse cannot be a virtual register");
817 if (PRRU != RealPhysRegUsed) {
818 // What was the sub-register index?
819 unsigned SubReg;
820 for (SubIdx = 1; (SubReg = TRI->getSubReg(PRRU, SubIdx)); SubIdx++)
821 if (SubReg == RealPhysRegUsed)
822 break;
823 assert(SubReg == RealPhysRegUsed &&
824 "Operand physreg is not a sub-register of PhysRegUsed");
825 }
826
Lang Hames87e3bca2009-05-06 02:36:21 +0000827 // Ok, we're going to try to reload the assigned physreg into the
828 // slot that we were supposed to in the first place. However, that
829 // register could hold a reuse. Check to see if it conflicts or
830 // would prefer us to use a different register.
Evan Cheng5d885022009-07-21 09:15:00 +0000831 unsigned NewPhysReg = GetRegForReload(RC, NewOp.AssignedPhysReg,
832 MF, MI, Spills, MaybeDeadStores,
833 Rejected, RegKills, KillOps, VRM);
David Greene2d4e6d32009-07-28 16:49:24 +0000834
835 bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
836 int SSorRMId = DoReMat
837 ? VRM.getReMatId(NewOp.VirtReg) : NewOp.StackSlotOrReMat;
838
839 // Back-schedule reloads and remats.
840 MachineBasicBlock::iterator InsertLoc =
841 ComputeReloadLoc(MI, MBB->begin(), PhysReg, TRI,
842 DoReMat, SSorRMId, TII, MF);
843
844 if (DoReMat) {
845 ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
846 TRI, VRM);
847 } else {
848 TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
Lang Hames87e3bca2009-05-06 02:36:21 +0000849 NewOp.StackSlotOrReMat, AliasRC);
David Greene2d4e6d32009-07-28 16:49:24 +0000850 MachineInstr *LoadMI = prior(InsertLoc);
Lang Hames87e3bca2009-05-06 02:36:21 +0000851 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
852 // Any stores to this stack slot are not dead anymore.
853 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
854 ++NumLoads;
855 }
856 Spills.ClobberPhysReg(NewPhysReg);
857 Spills.ClobberPhysReg(NewOp.PhysRegReused);
858
Evan Cheng427c3ba2009-10-25 07:51:47 +0000859 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) :NewPhysReg;
Lang Hames87e3bca2009-05-06 02:36:21 +0000860 MI->getOperand(NewOp.Operand).setReg(RReg);
861 MI->getOperand(NewOp.Operand).setSubReg(0);
862
863 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
David Greene2d4e6d32009-07-28 16:49:24 +0000864 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
Chris Lattner6456d382009-08-23 03:20:44 +0000865 DEBUG(errs() << '\t' << *prior(InsertLoc));
Lang Hames87e3bca2009-05-06 02:36:21 +0000866
Chris Lattner6456d382009-08-23 03:20:44 +0000867 DEBUG(errs() << "Reuse undone!\n");
Lang Hames87e3bca2009-05-06 02:36:21 +0000868 --NumReused;
869
870 // Finally, PhysReg is now available, go ahead and use it.
871 return PhysReg;
872 }
873 }
874 }
875 return PhysReg;
876}
877
878// ************************************************************************ //
879
880/// FoldsStackSlotModRef - Return true if the specified MI folds the specified
881/// stack slot mod/ref. It also checks if it's possible to unfold the
882/// instruction by having it define a specified physical register instead.
883static bool FoldsStackSlotModRef(MachineInstr &MI, int SS, unsigned PhysReg,
884 const TargetInstrInfo *TII,
885 const TargetRegisterInfo *TRI,
886 VirtRegMap &VRM) {
887 if (VRM.hasEmergencySpills(&MI) || VRM.isSpillPt(&MI))
888 return false;
889
890 bool Found = false;
891 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
892 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
893 unsigned VirtReg = I->second.first;
894 VirtRegMap::ModRef MR = I->second.second;
895 if (MR & VirtRegMap::isModRef)
896 if (VRM.getStackSlot(VirtReg) == SS) {
897 Found= TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), true, true) != 0;
898 break;
899 }
900 }
901 if (!Found)
902 return false;
903
904 // Does the instruction uses a register that overlaps the scratch register?
905 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
906 MachineOperand &MO = MI.getOperand(i);
907 if (!MO.isReg() || MO.getReg() == 0)
908 continue;
909 unsigned Reg = MO.getReg();
910 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
911 if (!VRM.hasPhys(Reg))
912 continue;
913 Reg = VRM.getPhys(Reg);
914 }
915 if (TRI->regsOverlap(PhysReg, Reg))
916 return false;
917 }
918 return true;
919}
920
921/// FindFreeRegister - Find a free register of a given register class by looking
922/// at (at most) the last two machine instructions.
923static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
924 MachineBasicBlock &MBB,
925 const TargetRegisterClass *RC,
926 const TargetRegisterInfo *TRI,
927 BitVector &AllocatableRegs) {
928 BitVector Defs(TRI->getNumRegs());
929 BitVector Uses(TRI->getNumRegs());
930 SmallVector<unsigned, 4> LocalUses;
931 SmallVector<unsigned, 4> Kills;
932
933 // Take a look at 2 instructions at most.
934 for (unsigned Count = 0; Count < 2; ++Count) {
935 if (MII == MBB.begin())
936 break;
937 MachineInstr *PrevMI = prior(MII);
938 for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
939 MachineOperand &MO = PrevMI->getOperand(i);
940 if (!MO.isReg() || MO.getReg() == 0)
941 continue;
942 unsigned Reg = MO.getReg();
943 if (MO.isDef()) {
944 Defs.set(Reg);
945 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
946 Defs.set(*AS);
947 } else {
948 LocalUses.push_back(Reg);
949 if (MO.isKill() && AllocatableRegs[Reg])
950 Kills.push_back(Reg);
951 }
952 }
953
954 for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
955 unsigned Kill = Kills[i];
956 if (!Defs[Kill] && !Uses[Kill] &&
957 TRI->getPhysicalRegisterRegClass(Kill) == RC)
958 return Kill;
959 }
960 for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
961 unsigned Reg = LocalUses[i];
962 Uses.set(Reg);
963 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
964 Uses.set(*AS);
965 }
966
967 MII = PrevMI;
968 }
969
970 return 0;
971}
972
973static
974void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg) {
975 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
976 MachineOperand &MO = MI->getOperand(i);
977 if (MO.isReg() && MO.getReg() == VirtReg)
978 MO.setReg(PhysReg);
979 }
980}
981
Evan Chengeca24fb2009-05-12 23:07:00 +0000982namespace {
983 struct RefSorter {
984 bool operator()(const std::pair<MachineInstr*, int> &A,
985 const std::pair<MachineInstr*, int> &B) {
986 return A.second < B.second;
987 }
988 };
989}
Lang Hames87e3bca2009-05-06 02:36:21 +0000990
991// ***************************** //
992// Local Spiller Implementation //
993// ***************************** //
994
Dan Gohman7db949d2009-08-07 01:32:21 +0000995namespace {
996
Nick Lewycky6726b6d2009-10-25 06:33:48 +0000997class LocalRewriter : public VirtRegRewriter {
Lang Hames87e3bca2009-05-06 02:36:21 +0000998 MachineRegisterInfo *RegInfo;
999 const TargetRegisterInfo *TRI;
1000 const TargetInstrInfo *TII;
1001 BitVector AllocatableRegs;
1002 DenseMap<MachineInstr*, unsigned> DistanceMap;
1003public:
1004
1005 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
1006 LiveIntervals* LIs) {
1007 RegInfo = &MF.getRegInfo();
1008 TRI = MF.getTarget().getRegisterInfo();
1009 TII = MF.getTarget().getInstrInfo();
1010 AllocatableRegs = TRI->getAllocatableSet(MF);
Daniel Dunbarce63ffb2009-07-25 00:23:56 +00001011 DEBUG(errs() << "\n**** Local spiller rewriting function '"
1012 << MF.getFunction()->getName() << "':\n");
Chris Lattner6456d382009-08-23 03:20:44 +00001013 DEBUG(errs() << "**** Machine Instrs (NOTE! Does not include spills and"
1014 " reloads!) ****\n");
Lang Hames87e3bca2009-05-06 02:36:21 +00001015 DEBUG(MF.dump());
1016
1017 // Spills - Keep track of which spilled values are available in physregs
1018 // so that we can choose to reuse the physregs instead of emitting
1019 // reloads. This is usually refreshed per basic block.
1020 AvailableSpills Spills(TRI, TII);
1021
1022 // Keep track of kill information.
1023 BitVector RegKills(TRI->getNumRegs());
1024 std::vector<MachineOperand*> KillOps;
1025 KillOps.resize(TRI->getNumRegs(), NULL);
1026
1027 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1028 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
1029 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
1030
1031 // Traverse the basic blocks depth first.
1032 MachineBasicBlock *Entry = MF.begin();
1033 SmallPtrSet<MachineBasicBlock*,16> Visited;
1034 for (df_ext_iterator<MachineBasicBlock*,
1035 SmallPtrSet<MachineBasicBlock*,16> >
1036 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1037 DFI != E; ++DFI) {
1038 MachineBasicBlock *MBB = *DFI;
1039 if (!EarlyVisited.count(MBB))
1040 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
1041
1042 // If this MBB is the only predecessor of a successor. Keep the
1043 // availability information and visit it next.
1044 do {
1045 // Keep visiting single predecessor successor as long as possible.
1046 SinglePredSuccs.clear();
1047 findSinglePredSuccessor(MBB, SinglePredSuccs);
1048 if (SinglePredSuccs.empty())
1049 MBB = 0;
1050 else {
1051 // FIXME: More than one successors, each of which has MBB has
1052 // the only predecessor.
1053 MBB = SinglePredSuccs[0];
1054 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
1055 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
1056 RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
1057 }
1058 }
1059 } while (MBB);
1060
1061 // Clear the availability info.
1062 Spills.clear();
1063 }
1064
Chris Lattner6456d382009-08-23 03:20:44 +00001065 DEBUG(errs() << "**** Post Machine Instrs ****\n");
Lang Hames87e3bca2009-05-06 02:36:21 +00001066 DEBUG(MF.dump());
1067
1068 // Mark unused spill slots.
1069 MachineFrameInfo *MFI = MF.getFrameInfo();
1070 int SS = VRM.getLowSpillSlot();
1071 if (SS != VirtRegMap::NO_STACK_SLOT)
1072 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
1073 if (!VRM.isSpillSlotUsed(SS)) {
1074 MFI->RemoveStackObject(SS);
1075 ++NumDSS;
1076 }
1077
1078 return true;
1079 }
1080
1081private:
1082
1083 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1084 /// a scratch register is available.
1085 /// xorq %r12<kill>, %r13
1086 /// addq %rax, -184(%rbp)
1087 /// addq %r13, -184(%rbp)
1088 /// ==>
1089 /// xorq %r12<kill>, %r13
1090 /// movq -184(%rbp), %r12
1091 /// addq %rax, %r12
1092 /// addq %r13, %r12
1093 /// movq %r12, -184(%rbp)
1094 bool OptimizeByUnfold2(unsigned VirtReg, int SS,
1095 MachineBasicBlock &MBB,
1096 MachineBasicBlock::iterator &MII,
1097 std::vector<MachineInstr*> &MaybeDeadStores,
1098 AvailableSpills &Spills,
1099 BitVector &RegKills,
1100 std::vector<MachineOperand*> &KillOps,
1101 VirtRegMap &VRM) {
1102
1103 MachineBasicBlock::iterator NextMII = next(MII);
1104 if (NextMII == MBB.end())
1105 return false;
1106
1107 if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
1108 return false;
1109
1110 // Now let's see if the last couple of instructions happens to have freed up
1111 // a register.
1112 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1113 unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs);
1114 if (!PhysReg)
1115 return false;
1116
1117 MachineFunction &MF = *MBB.getParent();
1118 TRI = MF.getTarget().getRegisterInfo();
1119 MachineInstr &MI = *MII;
1120 if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM))
1121 return false;
1122
1123 // If the next instruction also folds the same SS modref and can be unfoled,
1124 // then it's worthwhile to issue a load from SS into the free register and
1125 // then unfold these instructions.
1126 if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM))
1127 return false;
1128
David Greene2d4e6d32009-07-28 16:49:24 +00001129 // Back-schedule reloads and remats.
Duncan Sandsb7c5bdf2009-09-06 08:33:48 +00001130 ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF);
David Greene2d4e6d32009-07-28 16:49:24 +00001131
Lang Hames87e3bca2009-05-06 02:36:21 +00001132 // Load from SS to the spare physical register.
1133 TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC);
1134 // This invalidates Phys.
1135 Spills.ClobberPhysReg(PhysReg);
1136 // Remember it's available.
1137 Spills.addAvailable(SS, PhysReg);
1138 MaybeDeadStores[SS] = NULL;
1139
1140 // Unfold current MI.
1141 SmallVector<MachineInstr*, 4> NewMIs;
1142 if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
Torok Edwinc23197a2009-07-14 16:55:14 +00001143 llvm_unreachable("Unable unfold the load / store folding instruction!");
Lang Hames87e3bca2009-05-06 02:36:21 +00001144 assert(NewMIs.size() == 1);
1145 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1146 VRM.transferRestorePts(&MI, NewMIs[0]);
1147 MII = MBB.insert(MII, NewMIs[0]);
Evan Cheng427a6b62009-05-15 06:48:19 +00001148 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001149 VRM.RemoveMachineInstrFromMaps(&MI);
1150 MBB.erase(&MI);
1151 ++NumModRefUnfold;
1152
1153 // Unfold next instructions that fold the same SS.
1154 do {
1155 MachineInstr &NextMI = *NextMII;
1156 NextMII = next(NextMII);
1157 NewMIs.clear();
1158 if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
Torok Edwinc23197a2009-07-14 16:55:14 +00001159 llvm_unreachable("Unable unfold the load / store folding instruction!");
Lang Hames87e3bca2009-05-06 02:36:21 +00001160 assert(NewMIs.size() == 1);
1161 AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg);
1162 VRM.transferRestorePts(&NextMI, NewMIs[0]);
1163 MBB.insert(NextMII, NewMIs[0]);
Evan Cheng427a6b62009-05-15 06:48:19 +00001164 InvalidateKills(NextMI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001165 VRM.RemoveMachineInstrFromMaps(&NextMI);
1166 MBB.erase(&NextMI);
1167 ++NumModRefUnfold;
Evan Cheng2c48fe62009-06-03 09:00:27 +00001168 if (NextMII == MBB.end())
1169 break;
Lang Hames87e3bca2009-05-06 02:36:21 +00001170 } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM));
1171
1172 // Store the value back into SS.
1173 TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC);
1174 MachineInstr *StoreMI = prior(NextMII);
1175 VRM.addSpillSlotUse(SS, StoreMI);
1176 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1177
1178 return true;
1179 }
1180
1181 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1182 /// instruction. e.g.
1183 /// xorl %edi, %eax
1184 /// movl %eax, -32(%ebp)
1185 /// movl -36(%ebp), %eax
1186 /// orl %eax, -32(%ebp)
1187 /// ==>
1188 /// xorl %edi, %eax
1189 /// orl -36(%ebp), %eax
1190 /// mov %eax, -32(%ebp)
1191 /// This enables unfolding optimization for a subsequent instruction which will
1192 /// also eliminate the newly introduced store instruction.
1193 bool OptimizeByUnfold(MachineBasicBlock &MBB,
1194 MachineBasicBlock::iterator &MII,
1195 std::vector<MachineInstr*> &MaybeDeadStores,
1196 AvailableSpills &Spills,
1197 BitVector &RegKills,
1198 std::vector<MachineOperand*> &KillOps,
1199 VirtRegMap &VRM) {
1200 MachineFunction &MF = *MBB.getParent();
1201 MachineInstr &MI = *MII;
1202 unsigned UnfoldedOpc = 0;
1203 unsigned UnfoldPR = 0;
1204 unsigned UnfoldVR = 0;
1205 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1206 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1207 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1208 // Only transform a MI that folds a single register.
1209 if (UnfoldedOpc)
1210 return false;
1211 UnfoldVR = I->second.first;
1212 VirtRegMap::ModRef MR = I->second.second;
1213 // MI2VirtMap be can updated which invalidate the iterator.
1214 // Increment the iterator first.
1215 ++I;
1216 if (VRM.isAssignedReg(UnfoldVR))
1217 continue;
1218 // If this reference is not a use, any previous store is now dead.
1219 // Otherwise, the store to this stack slot is not dead anymore.
1220 FoldedSS = VRM.getStackSlot(UnfoldVR);
1221 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1222 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1223 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1224 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1225 continue;
1226 UnfoldPR = PhysReg;
1227 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1228 false, true);
1229 }
1230 }
1231
1232 if (!UnfoldedOpc) {
1233 if (!UnfoldVR)
1234 return false;
1235
1236 // Look for other unfolding opportunities.
1237 return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII,
1238 MaybeDeadStores, Spills, RegKills, KillOps, VRM);
1239 }
1240
1241 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1242 MachineOperand &MO = MI.getOperand(i);
1243 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1244 continue;
1245 unsigned VirtReg = MO.getReg();
1246 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1247 continue;
1248 if (VRM.isAssignedReg(VirtReg)) {
1249 unsigned PhysReg = VRM.getPhys(VirtReg);
1250 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1251 return false;
1252 } else if (VRM.isReMaterialized(VirtReg))
1253 continue;
1254 int SS = VRM.getStackSlot(VirtReg);
1255 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1256 if (PhysReg) {
1257 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1258 return false;
1259 continue;
1260 }
1261 if (VRM.hasPhys(VirtReg)) {
1262 PhysReg = VRM.getPhys(VirtReg);
1263 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1264 continue;
1265 }
1266
1267 // Ok, we'll need to reload the value into a register which makes
1268 // it impossible to perform the store unfolding optimization later.
1269 // Let's see if it is possible to fold the load if the store is
1270 // unfolded. This allows us to perform the store unfolding
1271 // optimization.
1272 SmallVector<MachineInstr*, 4> NewMIs;
1273 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1274 assert(NewMIs.size() == 1);
1275 MachineInstr *NewMI = NewMIs.back();
1276 NewMIs.clear();
1277 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1278 assert(Idx != -1);
1279 SmallVector<unsigned, 1> Ops;
1280 Ops.push_back(Idx);
1281 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
1282 if (FoldedMI) {
1283 VRM.addSpillSlotUse(SS, FoldedMI);
1284 if (!VRM.hasPhys(UnfoldVR))
1285 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
1286 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1287 MII = MBB.insert(MII, FoldedMI);
Evan Cheng427a6b62009-05-15 06:48:19 +00001288 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001289 VRM.RemoveMachineInstrFromMaps(&MI);
1290 MBB.erase(&MI);
1291 MF.DeleteMachineInstr(NewMI);
1292 return true;
1293 }
1294 MF.DeleteMachineInstr(NewMI);
1295 }
1296 }
1297
1298 return false;
1299 }
1300
Evan Cheng261ce1d2009-07-10 19:15:51 +00001301 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1302 /// where SrcReg is r1 and it is tied to r0. Return true if after
1303 /// commuting this instruction it will be r0 = op r2, r1.
1304 static bool CommuteChangesDestination(MachineInstr *DefMI,
1305 const TargetInstrDesc &TID,
1306 unsigned SrcReg,
1307 const TargetInstrInfo *TII,
1308 unsigned &DstIdx) {
1309 if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
1310 return false;
1311 if (!DefMI->getOperand(1).isReg() ||
1312 DefMI->getOperand(1).getReg() != SrcReg)
1313 return false;
1314 unsigned DefIdx;
1315 if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
1316 return false;
1317 unsigned SrcIdx1, SrcIdx2;
1318 if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
1319 return false;
1320 if (SrcIdx1 == 1 && SrcIdx2 == 2) {
1321 DstIdx = 2;
1322 return true;
1323 }
1324 return false;
1325 }
1326
Lang Hames87e3bca2009-05-06 02:36:21 +00001327 /// CommuteToFoldReload -
1328 /// Look for
1329 /// r1 = load fi#1
1330 /// r1 = op r1, r2<kill>
1331 /// store r1, fi#1
1332 ///
1333 /// If op is commutable and r2 is killed, then we can xform these to
1334 /// r2 = op r2, fi#1
1335 /// store r2, fi#1
1336 bool CommuteToFoldReload(MachineBasicBlock &MBB,
1337 MachineBasicBlock::iterator &MII,
1338 unsigned VirtReg, unsigned SrcReg, int SS,
1339 AvailableSpills &Spills,
1340 BitVector &RegKills,
1341 std::vector<MachineOperand*> &KillOps,
1342 const TargetRegisterInfo *TRI,
1343 VirtRegMap &VRM) {
1344 if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
1345 return false;
1346
1347 MachineFunction &MF = *MBB.getParent();
1348 MachineInstr &MI = *MII;
1349 MachineBasicBlock::iterator DefMII = prior(MII);
1350 MachineInstr *DefMI = DefMII;
1351 const TargetInstrDesc &TID = DefMI->getDesc();
1352 unsigned NewDstIdx;
1353 if (DefMII != MBB.begin() &&
1354 TID.isCommutable() &&
Evan Cheng261ce1d2009-07-10 19:15:51 +00001355 CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
Lang Hames87e3bca2009-05-06 02:36:21 +00001356 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1357 unsigned NewReg = NewDstMO.getReg();
1358 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1359 return false;
1360 MachineInstr *ReloadMI = prior(DefMII);
1361 int FrameIdx;
1362 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1363 if (DestReg != SrcReg || FrameIdx != SS)
1364 return false;
1365 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1366 if (UseIdx == -1)
1367 return false;
1368 unsigned DefIdx;
1369 if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
1370 return false;
1371 assert(DefMI->getOperand(DefIdx).isReg() &&
1372 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1373
1374 // Now commute def instruction.
1375 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1376 if (!CommutedMI)
1377 return false;
1378 SmallVector<unsigned, 1> Ops;
1379 Ops.push_back(NewDstIdx);
1380 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1381 // Not needed since foldMemoryOperand returns new MI.
1382 MF.DeleteMachineInstr(CommutedMI);
1383 if (!FoldedMI)
1384 return false;
1385
1386 VRM.addSpillSlotUse(SS, FoldedMI);
1387 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1388 // Insert new def MI and spill MI.
1389 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1390 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
1391 MII = prior(MII);
1392 MachineInstr *StoreMI = MII;
1393 VRM.addSpillSlotUse(SS, StoreMI);
1394 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1395 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
1396
1397 // Delete all 3 old instructions.
Evan Cheng427a6b62009-05-15 06:48:19 +00001398 InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001399 VRM.RemoveMachineInstrFromMaps(ReloadMI);
1400 MBB.erase(ReloadMI);
Evan Cheng427a6b62009-05-15 06:48:19 +00001401 InvalidateKills(*DefMI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001402 VRM.RemoveMachineInstrFromMaps(DefMI);
1403 MBB.erase(DefMI);
Evan Cheng427a6b62009-05-15 06:48:19 +00001404 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001405 VRM.RemoveMachineInstrFromMaps(&MI);
1406 MBB.erase(&MI);
1407
1408 // If NewReg was previously holding value of some SS, it's now clobbered.
1409 // This has to be done now because it's a physical register. When this
1410 // instruction is re-visited, it's ignored.
1411 Spills.ClobberPhysReg(NewReg);
1412
1413 ++NumCommutes;
1414 return true;
1415 }
1416
1417 return false;
1418 }
1419
1420 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1421 /// the last store to the same slot is now dead. If so, remove the last store.
1422 void SpillRegToStackSlot(MachineBasicBlock &MBB,
1423 MachineBasicBlock::iterator &MII,
1424 int Idx, unsigned PhysReg, int StackSlot,
1425 const TargetRegisterClass *RC,
1426 bool isAvailable, MachineInstr *&LastStore,
1427 AvailableSpills &Spills,
1428 SmallSet<MachineInstr*, 4> &ReMatDefs,
1429 BitVector &RegKills,
1430 std::vector<MachineOperand*> &KillOps,
1431 VirtRegMap &VRM) {
1432
Dale Johannesene841d2f2009-10-28 21:56:18 +00001433 MachineBasicBlock::iterator oldNextMII = next(MII);
Lang Hames87e3bca2009-05-06 02:36:21 +00001434 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
Dale Johannesen78c5cda2009-10-29 01:15:40 +00001435 MachineInstr *StoreMI = prior(oldNextMII);
Lang Hames87e3bca2009-05-06 02:36:21 +00001436 VRM.addSpillSlotUse(StackSlot, StoreMI);
Chris Lattner6456d382009-08-23 03:20:44 +00001437 DEBUG(errs() << "Store:\t" << *StoreMI);
Lang Hames87e3bca2009-05-06 02:36:21 +00001438
1439 // If there is a dead store to this stack slot, nuke it now.
1440 if (LastStore) {
Chris Lattner6456d382009-08-23 03:20:44 +00001441 DEBUG(errs() << "Removed dead store:\t" << *LastStore);
Lang Hames87e3bca2009-05-06 02:36:21 +00001442 ++NumDSE;
1443 SmallVector<unsigned, 2> KillRegs;
Evan Cheng427a6b62009-05-15 06:48:19 +00001444 InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
Lang Hames87e3bca2009-05-06 02:36:21 +00001445 MachineBasicBlock::iterator PrevMII = LastStore;
1446 bool CheckDef = PrevMII != MBB.begin();
1447 if (CheckDef)
1448 --PrevMII;
1449 VRM.RemoveMachineInstrFromMaps(LastStore);
1450 MBB.erase(LastStore);
1451 if (CheckDef) {
1452 // Look at defs of killed registers on the store. Mark the defs
1453 // as dead since the store has been deleted and they aren't
1454 // being reused.
1455 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1456 bool HasOtherDef = false;
1457 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
1458 MachineInstr *DeadDef = PrevMII;
1459 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
Evan Cheng4784f1f2009-06-30 08:49:04 +00001460 // FIXME: This assumes a remat def does not have side effects.
Lang Hames87e3bca2009-05-06 02:36:21 +00001461 VRM.RemoveMachineInstrFromMaps(DeadDef);
1462 MBB.erase(DeadDef);
1463 ++NumDRM;
1464 }
1465 }
1466 }
1467 }
1468 }
1469
Dale Johannesene841d2f2009-10-28 21:56:18 +00001470 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1471 // the last of multiple instructions is the actual store.
1472 LastStore = prior(oldNextMII);
Lang Hames87e3bca2009-05-06 02:36:21 +00001473
1474 // If the stack slot value was previously available in some other
1475 // register, change it now. Otherwise, make the register available,
1476 // in PhysReg.
1477 Spills.ModifyStackSlotOrReMat(StackSlot);
1478 Spills.ClobberPhysReg(PhysReg);
1479 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1480 ++NumStores;
1481 }
1482
Dale Johannesen3a6b9eb2009-10-12 18:49:00 +00001483 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1484 /// effect and all of its defs are dead.
1485 static bool isSafeToDelete(MachineInstr &MI) {
1486 const TargetInstrDesc &TID = MI.getDesc();
1487 if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
1488 TID.isCall() || TID.isBarrier() || TID.isReturn() ||
1489 TID.hasUnmodeledSideEffects())
1490 return false;
1491 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1492 MachineOperand &MO = MI.getOperand(i);
1493 if (!MO.isReg() || !MO.getReg())
1494 continue;
1495 if (MO.isDef() && !MO.isDead())
1496 return false;
1497 if (MO.isUse() && MO.isKill())
1498 // FIXME: We can't remove kill markers or else the scavenger will assert.
1499 // An alternative is to add a ADD pseudo instruction to replace kill
1500 // markers.
1501 return false;
1502 }
1503 return true;
1504 }
1505
Lang Hames87e3bca2009-05-06 02:36:21 +00001506 /// TransferDeadness - A identity copy definition is dead and it's being
1507 /// removed. Find the last def or use and mark it as dead / kill.
1508 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
1509 unsigned Reg, BitVector &RegKills,
Evan Chengeca24fb2009-05-12 23:07:00 +00001510 std::vector<MachineOperand*> &KillOps,
1511 VirtRegMap &VRM) {
1512 SmallPtrSet<MachineInstr*, 4> Seens;
1513 SmallVector<std::pair<MachineInstr*, int>,8> Refs;
Lang Hames87e3bca2009-05-06 02:36:21 +00001514 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
1515 RE = RegInfo->reg_end(); RI != RE; ++RI) {
1516 MachineInstr *UDMI = &*RI;
1517 if (UDMI->getParent() != MBB)
1518 continue;
1519 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1520 if (DI == DistanceMap.end() || DI->second > CurDist)
1521 continue;
Evan Chengeca24fb2009-05-12 23:07:00 +00001522 if (Seens.insert(UDMI))
1523 Refs.push_back(std::make_pair(UDMI, DI->second));
Lang Hames87e3bca2009-05-06 02:36:21 +00001524 }
1525
Evan Chengeca24fb2009-05-12 23:07:00 +00001526 if (Refs.empty())
1527 return;
1528 std::sort(Refs.begin(), Refs.end(), RefSorter());
1529
1530 while (!Refs.empty()) {
1531 MachineInstr *LastUDMI = Refs.back().first;
1532 Refs.pop_back();
1533
Lang Hames87e3bca2009-05-06 02:36:21 +00001534 MachineOperand *LastUD = NULL;
1535 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1536 MachineOperand &MO = LastUDMI->getOperand(i);
1537 if (!MO.isReg() || MO.getReg() != Reg)
1538 continue;
1539 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1540 LastUD = &MO;
1541 if (LastUDMI->isRegTiedToDefOperand(i))
Evan Chengeca24fb2009-05-12 23:07:00 +00001542 break;
Lang Hames87e3bca2009-05-06 02:36:21 +00001543 }
Evan Chengeca24fb2009-05-12 23:07:00 +00001544 if (LastUD->isDef()) {
1545 // If the instruction has no side effect, delete it and propagate
1546 // backward further. Otherwise, mark is dead and we are done.
Dale Johannesen3a6b9eb2009-10-12 18:49:00 +00001547 if (!isSafeToDelete(*LastUDMI)) {
Evan Chengeca24fb2009-05-12 23:07:00 +00001548 LastUD->setIsDead();
1549 break;
1550 }
1551 VRM.RemoveMachineInstrFromMaps(LastUDMI);
1552 MBB->erase(LastUDMI);
1553 } else {
Lang Hames87e3bca2009-05-06 02:36:21 +00001554 LastUD->setIsKill();
1555 RegKills.set(Reg);
1556 KillOps[Reg] = LastUD;
Evan Chengeca24fb2009-05-12 23:07:00 +00001557 break;
Lang Hames87e3bca2009-05-06 02:36:21 +00001558 }
1559 }
1560 }
1561
1562 /// rewriteMBB - Keep track of which spills are available even after the
1563 /// register allocator is done with them. If possible, avid reloading vregs.
1564 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
1565 LiveIntervals *LIs,
1566 AvailableSpills &Spills, BitVector &RegKills,
1567 std::vector<MachineOperand*> &KillOps) {
1568
Daniel Dunbarce63ffb2009-07-25 00:23:56 +00001569 DEBUG(errs() << "\n**** Local spiller rewriting MBB '"
1570 << MBB.getBasicBlock()->getName() << "':\n");
Lang Hames87e3bca2009-05-06 02:36:21 +00001571
1572 MachineFunction &MF = *MBB.getParent();
1573
1574 // MaybeDeadStores - When we need to write a value back into a stack slot,
1575 // keep track of the inserted store. If the stack slot value is never read
1576 // (because the value was used from some available register, for example), and
1577 // subsequently stored to, the original store is dead. This map keeps track
1578 // of inserted stores that are not used. If we see a subsequent store to the
1579 // same stack slot, the original store is deleted.
1580 std::vector<MachineInstr*> MaybeDeadStores;
1581 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1582
1583 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1584 SmallSet<MachineInstr*, 4> ReMatDefs;
1585
1586 // Clear kill info.
1587 SmallSet<unsigned, 2> KilledMIRegs;
1588 RegKills.reset();
1589 KillOps.clear();
1590 KillOps.resize(TRI->getNumRegs(), NULL);
1591
1592 unsigned Dist = 0;
1593 DistanceMap.clear();
1594 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
1595 MII != E; ) {
1596 MachineBasicBlock::iterator NextMII = next(MII);
1597
1598 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1599 bool Erased = false;
1600 bool BackTracked = false;
1601 if (OptimizeByUnfold(MBB, MII,
1602 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
1603 NextMII = next(MII);
1604
1605 MachineInstr &MI = *MII;
1606
1607 if (VRM.hasEmergencySpills(&MI)) {
1608 // Spill physical register(s) in the rare case the allocator has run out
1609 // of registers to allocate.
1610 SmallSet<int, 4> UsedSS;
1611 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
1612 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1613 unsigned PhysReg = EmSpills[i];
1614 const TargetRegisterClass *RC =
1615 TRI->getPhysicalRegisterRegClass(PhysReg);
1616 assert(RC && "Unable to determine register class!");
1617 int SS = VRM.getEmergencySpillSlot(RC);
1618 if (UsedSS.count(SS))
Torok Edwinc23197a2009-07-14 16:55:14 +00001619 llvm_unreachable("Need to spill more than one physical registers!");
Lang Hames87e3bca2009-05-06 02:36:21 +00001620 UsedSS.insert(SS);
1621 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
1622 MachineInstr *StoreMI = prior(MII);
1623 VRM.addSpillSlotUse(SS, StoreMI);
David Greene2d4e6d32009-07-28 16:49:24 +00001624
1625 // Back-schedule reloads and remats.
1626 MachineBasicBlock::iterator InsertLoc =
1627 ComputeReloadLoc(next(MII), MBB.begin(), PhysReg, TRI, false,
1628 SS, TII, MF);
1629
1630 TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC);
1631
1632 MachineInstr *LoadMI = prior(InsertLoc);
Lang Hames87e3bca2009-05-06 02:36:21 +00001633 VRM.addSpillSlotUse(SS, LoadMI);
1634 ++NumPSpills;
Jakob Stoklund Olesen7a1e8722009-08-15 11:03:03 +00001635 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
Lang Hames87e3bca2009-05-06 02:36:21 +00001636 }
1637 NextMII = next(MII);
1638 }
1639
1640 // Insert restores here if asked to.
1641 if (VRM.isRestorePt(&MI)) {
1642 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
1643 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1644 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1645 if (!VRM.getPreSplitReg(VirtReg))
1646 continue; // Split interval spilled again.
1647 unsigned Phys = VRM.getPhys(VirtReg);
1648 RegInfo->setPhysRegUsed(Phys);
1649
1650 // Check if the value being restored if available. If so, it must be
1651 // from a predecessor BB that fallthrough into this BB. We do not
1652 // expect:
1653 // BB1:
1654 // r1 = load fi#1
1655 // ...
1656 // = r1<kill>
1657 // ... # r1 not clobbered
1658 // ...
1659 // = load fi#1
1660 bool DoReMat = VRM.isReMaterialized(VirtReg);
1661 int SSorRMId = DoReMat
1662 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1663 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1664 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1665 if (InReg == Phys) {
1666 // If the value is already available in the expected register, save
1667 // a reload / remat.
1668 if (SSorRMId)
Chris Lattner6456d382009-08-23 03:20:44 +00001669 DEBUG(errs() << "Reusing RM#"
1670 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
Lang Hames87e3bca2009-05-06 02:36:21 +00001671 else
Chris Lattner6456d382009-08-23 03:20:44 +00001672 DEBUG(errs() << "Reusing SS#" << SSorRMId);
1673 DEBUG(errs() << " from physreg "
1674 << TRI->getName(InReg) << " for vreg"
1675 << VirtReg <<" instead of reloading into physreg "
1676 << TRI->getName(Phys) << '\n');
Lang Hames87e3bca2009-05-06 02:36:21 +00001677 ++NumOmitted;
1678 continue;
1679 } else if (InReg && InReg != Phys) {
1680 if (SSorRMId)
Chris Lattner6456d382009-08-23 03:20:44 +00001681 DEBUG(errs() << "Reusing RM#"
1682 << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
Lang Hames87e3bca2009-05-06 02:36:21 +00001683 else
Chris Lattner6456d382009-08-23 03:20:44 +00001684 DEBUG(errs() << "Reusing SS#" << SSorRMId);
1685 DEBUG(errs() << " from physreg "
1686 << TRI->getName(InReg) << " for vreg"
1687 << VirtReg <<" by copying it into physreg "
1688 << TRI->getName(Phys) << '\n');
Lang Hames87e3bca2009-05-06 02:36:21 +00001689
1690 // If the reloaded / remat value is available in another register,
1691 // copy it to the desired register.
David Greene2d4e6d32009-07-28 16:49:24 +00001692
1693 // Back-schedule reloads and remats.
1694 MachineBasicBlock::iterator InsertLoc =
1695 ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
1696 SSorRMId, TII, MF);
1697
1698 TII->copyRegToReg(MBB, InsertLoc, Phys, InReg, RC, RC);
Lang Hames87e3bca2009-05-06 02:36:21 +00001699
1700 // This invalidates Phys.
1701 Spills.ClobberPhysReg(Phys);
1702 // Remember it's available.
1703 Spills.addAvailable(SSorRMId, Phys);
1704
1705 // Mark is killed.
David Greene2d4e6d32009-07-28 16:49:24 +00001706 MachineInstr *CopyMI = prior(InsertLoc);
David Greene6bedb302009-11-12 21:07:54 +00001707 CopyMI->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
Lang Hames87e3bca2009-05-06 02:36:21 +00001708 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1709 KillOpnd->setIsKill();
Evan Cheng427a6b62009-05-15 06:48:19 +00001710 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001711
Chris Lattner6456d382009-08-23 03:20:44 +00001712 DEBUG(errs() << '\t' << *CopyMI);
Lang Hames87e3bca2009-05-06 02:36:21 +00001713 ++NumCopified;
1714 continue;
1715 }
1716
David Greene2d4e6d32009-07-28 16:49:24 +00001717 // Back-schedule reloads and remats.
1718 MachineBasicBlock::iterator InsertLoc =
1719 ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
1720 SSorRMId, TII, MF);
1721
Lang Hames87e3bca2009-05-06 02:36:21 +00001722 if (VRM.isReMaterialized(VirtReg)) {
David Greene2d4e6d32009-07-28 16:49:24 +00001723 ReMaterialize(MBB, InsertLoc, Phys, VirtReg, TII, TRI, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00001724 } else {
1725 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
David Greene2d4e6d32009-07-28 16:49:24 +00001726 TII->loadRegFromStackSlot(MBB, InsertLoc, Phys, SSorRMId, RC);
1727 MachineInstr *LoadMI = prior(InsertLoc);
Lang Hames87e3bca2009-05-06 02:36:21 +00001728 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1729 ++NumLoads;
Jakob Stoklund Olesen7a1e8722009-08-15 11:03:03 +00001730 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
Lang Hames87e3bca2009-05-06 02:36:21 +00001731 }
1732
1733 // This invalidates Phys.
1734 Spills.ClobberPhysReg(Phys);
1735 // Remember it's available.
1736 Spills.addAvailable(SSorRMId, Phys);
1737
David Greene2d4e6d32009-07-28 16:49:24 +00001738 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
Chris Lattner6456d382009-08-23 03:20:44 +00001739 DEBUG(errs() << '\t' << *prior(MII));
Lang Hames87e3bca2009-05-06 02:36:21 +00001740 }
1741 }
1742
1743 // Insert spills here if asked to.
1744 if (VRM.isSpillPt(&MI)) {
1745 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1746 VRM.getSpillPtSpills(&MI);
1747 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1748 unsigned VirtReg = SpillRegs[i].first;
1749 bool isKill = SpillRegs[i].second;
1750 if (!VRM.getPreSplitReg(VirtReg))
1751 continue; // Split interval spilled again.
1752 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1753 unsigned Phys = VRM.getPhys(VirtReg);
1754 int StackSlot = VRM.getStackSlot(VirtReg);
Dale Johannesen78c5cda2009-10-29 01:15:40 +00001755 MachineBasicBlock::iterator oldNextMII = next(MII);
Lang Hames87e3bca2009-05-06 02:36:21 +00001756 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
Dale Johannesen78c5cda2009-10-29 01:15:40 +00001757 MachineInstr *StoreMI = prior(oldNextMII);
Lang Hames87e3bca2009-05-06 02:36:21 +00001758 VRM.addSpillSlotUse(StackSlot, StoreMI);
Chris Lattner6456d382009-08-23 03:20:44 +00001759 DEBUG(errs() << "Store:\t" << *StoreMI);
Lang Hames87e3bca2009-05-06 02:36:21 +00001760 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1761 }
1762 NextMII = next(MII);
1763 }
1764
1765 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1766 /// reuse.
1767 ReuseInfo ReusedOperands(MI, TRI);
1768 SmallVector<unsigned, 4> VirtUseOps;
1769 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1770 MachineOperand &MO = MI.getOperand(i);
1771 if (!MO.isReg() || MO.getReg() == 0)
1772 continue; // Ignore non-register operands.
1773
1774 unsigned VirtReg = MO.getReg();
1775 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1776 // Ignore physregs for spilling, but remember that it is used by this
1777 // function.
1778 RegInfo->setPhysRegUsed(VirtReg);
1779 continue;
1780 }
1781
1782 // We want to process implicit virtual register uses first.
1783 if (MO.isImplicit())
1784 // If the virtual register is implicitly defined, emit a implicit_def
1785 // before so scavenger knows it's "defined".
Evan Cheng4784f1f2009-06-30 08:49:04 +00001786 // FIXME: This is a horrible hack done the by register allocator to
1787 // remat a definition with virtual register operand.
Lang Hames87e3bca2009-05-06 02:36:21 +00001788 VirtUseOps.insert(VirtUseOps.begin(), i);
1789 else
1790 VirtUseOps.push_back(i);
1791 }
1792
1793 // Process all of the spilled uses and all non spilled reg references.
1794 SmallVector<int, 2> PotentialDeadStoreSlots;
1795 KilledMIRegs.clear();
1796 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1797 unsigned i = VirtUseOps[j];
1798 MachineOperand &MO = MI.getOperand(i);
1799 unsigned VirtReg = MO.getReg();
1800 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1801 "Not a virtual register?");
1802
1803 unsigned SubIdx = MO.getSubReg();
1804 if (VRM.isAssignedReg(VirtReg)) {
1805 // This virtual register was assigned a physreg!
1806 unsigned Phys = VRM.getPhys(VirtReg);
1807 RegInfo->setPhysRegUsed(Phys);
1808 if (MO.isDef())
1809 ReusedOperands.markClobbered(Phys);
1810 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1811 MI.getOperand(i).setReg(RReg);
1812 MI.getOperand(i).setSubReg(0);
1813 if (VRM.isImplicitlyDefined(VirtReg))
Evan Cheng4784f1f2009-06-30 08:49:04 +00001814 // FIXME: Is this needed?
Lang Hames87e3bca2009-05-06 02:36:21 +00001815 BuildMI(MBB, &MI, MI.getDebugLoc(),
1816 TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1817 continue;
1818 }
1819
1820 // This virtual register is now known to be a spilled value.
1821 if (!MO.isUse())
1822 continue; // Handle defs in the loop below (handle use&def here though)
1823
Evan Cheng4784f1f2009-06-30 08:49:04 +00001824 bool AvoidReload = MO.isUndef();
1825 // Check if it is defined by an implicit def. It should not be spilled.
1826 // Note, this is for correctness reason. e.g.
1827 // 8 %reg1024<def> = IMPLICIT_DEF
1828 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1829 // The live range [12, 14) are not part of the r1024 live interval since
1830 // it's defined by an implicit def. It will not conflicts with live
1831 // interval of r1025. Now suppose both registers are spilled, you can
1832 // easily see a situation where both registers are reloaded before
1833 // the INSERT_SUBREG and both target registers that would overlap.
Lang Hames87e3bca2009-05-06 02:36:21 +00001834 bool DoReMat = VRM.isReMaterialized(VirtReg);
1835 int SSorRMId = DoReMat
1836 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1837 int ReuseSlot = SSorRMId;
1838
1839 // Check to see if this stack slot is available.
1840 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1841
1842 // If this is a sub-register use, make sure the reuse register is in the
1843 // right register class. For example, for x86 not all of the 32-bit
1844 // registers have accessible sub-registers.
1845 // Similarly so for EXTRACT_SUBREG. Consider this:
1846 // EDI = op
1847 // MOV32_mr fi#1, EDI
1848 // ...
1849 // = EXTRACT_SUBREG fi#1
1850 // fi#1 is available in EDI, but it cannot be reused because it's not in
1851 // the right register file.
1852 if (PhysReg && !AvoidReload &&
1853 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1854 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1855 if (!RC->contains(PhysReg))
1856 PhysReg = 0;
1857 }
1858
1859 if (PhysReg && !AvoidReload) {
1860 // This spilled operand might be part of a two-address operand. If this
1861 // is the case, then changing it will necessarily require changing the
1862 // def part of the instruction as well. However, in some cases, we
1863 // aren't allowed to modify the reused register. If none of these cases
1864 // apply, reuse it.
1865 bool CanReuse = true;
1866 bool isTied = MI.isRegTiedToDefOperand(i);
1867 if (isTied) {
1868 // Okay, we have a two address operand. We can reuse this physreg as
1869 // long as we are allowed to clobber the value and there isn't an
1870 // earlier def that has already clobbered the physreg.
1871 CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
1872 Spills.canClobberPhysReg(PhysReg);
1873 }
1874
1875 if (CanReuse) {
1876 // If this stack slot value is already available, reuse it!
1877 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
Chris Lattner6456d382009-08-23 03:20:44 +00001878 DEBUG(errs() << "Reusing RM#"
1879 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
Lang Hames87e3bca2009-05-06 02:36:21 +00001880 else
Chris Lattner6456d382009-08-23 03:20:44 +00001881 DEBUG(errs() << "Reusing SS#" << ReuseSlot);
1882 DEBUG(errs() << " from physreg "
1883 << TRI->getName(PhysReg) << " for vreg"
1884 << VirtReg <<" instead of reloading into physreg "
1885 << TRI->getName(VRM.getPhys(VirtReg)) << '\n');
Lang Hames87e3bca2009-05-06 02:36:21 +00001886 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1887 MI.getOperand(i).setReg(RReg);
1888 MI.getOperand(i).setSubReg(0);
1889
1890 // The only technical detail we have is that we don't know that
1891 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1892 // later in the instruction. In particular, consider 'op V1, V2'.
1893 // If V1 is available in physreg R0, we would choose to reuse it
1894 // here, instead of reloading it into the register the allocator
1895 // indicated (say R1). However, V2 might have to be reloaded
1896 // later, and it might indicate that it needs to live in R0. When
1897 // this occurs, we need to have information available that
1898 // indicates it is safe to use R1 for the reload instead of R0.
1899 //
1900 // To further complicate matters, we might conflict with an alias,
1901 // or R0 and R1 might not be compatible with each other. In this
1902 // case, we actually insert a reload for V1 in R1, ensuring that
1903 // we can get at R0 or its alias.
1904 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1905 VRM.getPhys(VirtReg), VirtReg);
1906 if (isTied)
1907 // Only mark it clobbered if this is a use&def operand.
1908 ReusedOperands.markClobbered(PhysReg);
1909 ++NumReused;
1910
1911 if (MI.getOperand(i).isKill() &&
1912 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1913
1914 // The store of this spilled value is potentially dead, but we
1915 // won't know for certain until we've confirmed that the re-use
1916 // above is valid, which means waiting until the other operands
1917 // are processed. For now we just track the spill slot, we'll
1918 // remove it after the other operands are processed if valid.
1919
1920 PotentialDeadStoreSlots.push_back(ReuseSlot);
1921 }
1922
1923 // Mark is isKill if it's there no other uses of the same virtual
1924 // register and it's not a two-address operand. IsKill will be
1925 // unset if reg is reused.
1926 if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
1927 MI.getOperand(i).setIsKill();
1928 KilledMIRegs.insert(VirtReg);
1929 }
1930
1931 continue;
1932 } // CanReuse
1933
1934 // Otherwise we have a situation where we have a two-address instruction
1935 // whose mod/ref operand needs to be reloaded. This reload is already
1936 // available in some register "PhysReg", but if we used PhysReg as the
1937 // operand to our 2-addr instruction, the instruction would modify
1938 // PhysReg. This isn't cool if something later uses PhysReg and expects
1939 // to get its initial value.
1940 //
1941 // To avoid this problem, and to avoid doing a load right after a store,
1942 // we emit a copy from PhysReg into the designated register for this
1943 // operand.
1944 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1945 assert(DesignatedReg && "Must map virtreg to physreg!");
1946
1947 // Note that, if we reused a register for a previous operand, the
1948 // register we want to reload into might not actually be
1949 // available. If this occurs, use the register indicated by the
1950 // reuser.
1951 if (ReusedOperands.hasReuses())
Evan Cheng5d885022009-07-21 09:15:00 +00001952 DesignatedReg = ReusedOperands.GetRegForReload(VirtReg,
1953 DesignatedReg, &MI,
1954 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00001955
1956 // If the mapped designated register is actually the physreg we have
1957 // incoming, we don't need to inserted a dead copy.
1958 if (DesignatedReg == PhysReg) {
1959 // If this stack slot value is already available, reuse it!
1960 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
Chris Lattner6456d382009-08-23 03:20:44 +00001961 DEBUG(errs() << "Reusing RM#"
1962 << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
Lang Hames87e3bca2009-05-06 02:36:21 +00001963 else
Chris Lattner6456d382009-08-23 03:20:44 +00001964 DEBUG(errs() << "Reusing SS#" << ReuseSlot);
1965 DEBUG(errs() << " from physreg " << TRI->getName(PhysReg)
1966 << " for vreg" << VirtReg
1967 << " instead of reloading into same physreg.\n");
Lang Hames87e3bca2009-05-06 02:36:21 +00001968 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1969 MI.getOperand(i).setReg(RReg);
1970 MI.getOperand(i).setSubReg(0);
1971 ReusedOperands.markClobbered(RReg);
1972 ++NumReused;
1973 continue;
1974 }
1975
1976 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1977 RegInfo->setPhysRegUsed(DesignatedReg);
1978 ReusedOperands.markClobbered(DesignatedReg);
Lang Hames87e3bca2009-05-06 02:36:21 +00001979
David Greene2d4e6d32009-07-28 16:49:24 +00001980 // Back-schedule reloads and remats.
1981 MachineBasicBlock::iterator InsertLoc =
1982 ComputeReloadLoc(&MI, MBB.begin(), PhysReg, TRI, DoReMat,
1983 SSorRMId, TII, MF);
1984
1985 TII->copyRegToReg(MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
1986
1987 MachineInstr *CopyMI = prior(InsertLoc);
David Greene6bedb302009-11-12 21:07:54 +00001988 CopyMI->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
Evan Cheng427a6b62009-05-15 06:48:19 +00001989 UpdateKills(*CopyMI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00001990
1991 // This invalidates DesignatedReg.
1992 Spills.ClobberPhysReg(DesignatedReg);
1993
1994 Spills.addAvailable(ReuseSlot, DesignatedReg);
1995 unsigned RReg =
1996 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1997 MI.getOperand(i).setReg(RReg);
1998 MI.getOperand(i).setSubReg(0);
Chris Lattner6456d382009-08-23 03:20:44 +00001999 DEBUG(errs() << '\t' << *prior(MII));
Lang Hames87e3bca2009-05-06 02:36:21 +00002000 ++NumReused;
2001 continue;
2002 } // if (PhysReg)
2003
2004 // Otherwise, reload it and remember that we have it.
2005 PhysReg = VRM.getPhys(VirtReg);
2006 assert(PhysReg && "Must map virtreg to physreg!");
2007
2008 // Note that, if we reused a register for a previous operand, the
2009 // register we want to reload into might not actually be
2010 // available. If this occurs, use the register indicated by the
2011 // reuser.
2012 if (ReusedOperands.hasReuses())
Evan Cheng5d885022009-07-21 09:15:00 +00002013 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2014 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00002015
2016 RegInfo->setPhysRegUsed(PhysReg);
2017 ReusedOperands.markClobbered(PhysReg);
2018 if (AvoidReload)
2019 ++NumAvoided;
2020 else {
David Greene2d4e6d32009-07-28 16:49:24 +00002021 // Back-schedule reloads and remats.
2022 MachineBasicBlock::iterator InsertLoc =
2023 ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, DoReMat,
2024 SSorRMId, TII, MF);
2025
Lang Hames87e3bca2009-05-06 02:36:21 +00002026 if (DoReMat) {
David Greene2d4e6d32009-07-28 16:49:24 +00002027 ReMaterialize(MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00002028 } else {
2029 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
David Greene2d4e6d32009-07-28 16:49:24 +00002030 TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SSorRMId, RC);
2031 MachineInstr *LoadMI = prior(InsertLoc);
Lang Hames87e3bca2009-05-06 02:36:21 +00002032 VRM.addSpillSlotUse(SSorRMId, LoadMI);
2033 ++NumLoads;
Jakob Stoklund Olesen7a1e8722009-08-15 11:03:03 +00002034 DistanceMap.insert(std::make_pair(LoadMI, Dist++));
Lang Hames87e3bca2009-05-06 02:36:21 +00002035 }
2036 // This invalidates PhysReg.
2037 Spills.ClobberPhysReg(PhysReg);
2038
2039 // Any stores to this stack slot are not dead anymore.
2040 if (!DoReMat)
2041 MaybeDeadStores[SSorRMId] = NULL;
2042 Spills.addAvailable(SSorRMId, PhysReg);
2043 // Assumes this is the last use. IsKill will be unset if reg is reused
2044 // unless it's a two-address operand.
2045 if (!MI.isRegTiedToDefOperand(i) &&
2046 KilledMIRegs.count(VirtReg) == 0) {
2047 MI.getOperand(i).setIsKill();
2048 KilledMIRegs.insert(VirtReg);
2049 }
2050
David Greene2d4e6d32009-07-28 16:49:24 +00002051 UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
Chris Lattner6456d382009-08-23 03:20:44 +00002052 DEBUG(errs() << '\t' << *prior(InsertLoc));
Lang Hames87e3bca2009-05-06 02:36:21 +00002053 }
2054 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2055 MI.getOperand(i).setReg(RReg);
2056 MI.getOperand(i).setSubReg(0);
2057 }
2058
2059 // Ok - now we can remove stores that have been confirmed dead.
2060 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
2061 // This was the last use and the spilled value is still available
2062 // for reuse. That means the spill was unnecessary!
2063 int PDSSlot = PotentialDeadStoreSlots[j];
2064 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
2065 if (DeadStore) {
Chris Lattner6456d382009-08-23 03:20:44 +00002066 DEBUG(errs() << "Removed dead store:\t" << *DeadStore);
Evan Cheng427a6b62009-05-15 06:48:19 +00002067 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002068 VRM.RemoveMachineInstrFromMaps(DeadStore);
2069 MBB.erase(DeadStore);
2070 MaybeDeadStores[PDSSlot] = NULL;
2071 ++NumDSE;
2072 }
2073 }
2074
2075
Chris Lattner6456d382009-08-23 03:20:44 +00002076 DEBUG(errs() << '\t' << MI);
Lang Hames87e3bca2009-05-06 02:36:21 +00002077
2078
2079 // If we have folded references to memory operands, make sure we clear all
2080 // physical registers that may contain the value of the spilled virtual
2081 // register
2082 SmallSet<int, 2> FoldedSS;
2083 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
2084 unsigned VirtReg = I->second.first;
2085 VirtRegMap::ModRef MR = I->second.second;
Chris Lattner6456d382009-08-23 03:20:44 +00002086 DEBUG(errs() << "Folded vreg: " << VirtReg << " MR: " << MR);
Lang Hames87e3bca2009-05-06 02:36:21 +00002087
2088 // MI2VirtMap be can updated which invalidate the iterator.
2089 // Increment the iterator first.
2090 ++I;
2091 int SS = VRM.getStackSlot(VirtReg);
2092 if (SS == VirtRegMap::NO_STACK_SLOT)
2093 continue;
2094 FoldedSS.insert(SS);
Chris Lattner6456d382009-08-23 03:20:44 +00002095 DEBUG(errs() << " - StackSlot: " << SS << "\n");
Lang Hames87e3bca2009-05-06 02:36:21 +00002096
2097 // If this folded instruction is just a use, check to see if it's a
2098 // straight load from the virt reg slot.
2099 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
2100 int FrameIdx;
2101 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
2102 if (DestReg && FrameIdx == SS) {
2103 // If this spill slot is available, turn it into a copy (or nothing)
2104 // instead of leaving it as a load!
2105 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
Chris Lattner6456d382009-08-23 03:20:44 +00002106 DEBUG(errs() << "Promoted Load To Copy: " << MI);
Lang Hames87e3bca2009-05-06 02:36:21 +00002107 if (DestReg != InReg) {
2108 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
2109 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
2110 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
2111 unsigned SubIdx = DefMO->getSubReg();
2112 // Revisit the copy so we make sure to notice the effects of the
2113 // operation on the destreg (either needing to RA it if it's
2114 // virtual or needing to clobber any values if it's physical).
2115 NextMII = &MI;
2116 --NextMII; // backtrack to the copy.
David Greene6bedb302009-11-12 21:07:54 +00002117 NextMII->setAsmPrinterFlag(AsmPrinter::ReloadReuse);
Lang Hames87e3bca2009-05-06 02:36:21 +00002118 // Propagate the sub-register index over.
2119 if (SubIdx) {
2120 DefMO = NextMII->findRegisterDefOperand(DestReg);
2121 DefMO->setSubReg(SubIdx);
2122 }
2123
2124 // Mark is killed.
2125 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
2126 KillOpnd->setIsKill();
2127
2128 BackTracked = true;
2129 } else {
Chris Lattner6456d382009-08-23 03:20:44 +00002130 DEBUG(errs() << "Removing now-noop copy: " << MI);
Lang Hames87e3bca2009-05-06 02:36:21 +00002131 // Unset last kill since it's being reused.
Evan Cheng427a6b62009-05-15 06:48:19 +00002132 InvalidateKill(InReg, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002133 Spills.disallowClobberPhysReg(InReg);
2134 }
2135
Evan Cheng427a6b62009-05-15 06:48:19 +00002136 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002137 VRM.RemoveMachineInstrFromMaps(&MI);
2138 MBB.erase(&MI);
2139 Erased = true;
2140 goto ProcessNextInst;
2141 }
2142 } else {
2143 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2144 SmallVector<MachineInstr*, 4> NewMIs;
2145 if (PhysReg &&
2146 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
2147 MBB.insert(MII, NewMIs[0]);
Evan Cheng427a6b62009-05-15 06:48:19 +00002148 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002149 VRM.RemoveMachineInstrFromMaps(&MI);
2150 MBB.erase(&MI);
2151 Erased = true;
2152 --NextMII; // backtrack to the unfolded instruction.
2153 BackTracked = true;
2154 goto ProcessNextInst;
2155 }
2156 }
2157 }
2158
2159 // If this reference is not a use, any previous store is now dead.
2160 // Otherwise, the store to this stack slot is not dead anymore.
2161 MachineInstr* DeadStore = MaybeDeadStores[SS];
2162 if (DeadStore) {
2163 bool isDead = !(MR & VirtRegMap::isRef);
2164 MachineInstr *NewStore = NULL;
2165 if (MR & VirtRegMap::isModRef) {
2166 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
2167 SmallVector<MachineInstr*, 4> NewMIs;
2168 // We can reuse this physreg as long as we are allowed to clobber
2169 // the value and there isn't an earlier def that has already clobbered
2170 // the physreg.
2171 if (PhysReg &&
2172 !ReusedOperands.isClobbered(PhysReg) &&
2173 Spills.canClobberPhysReg(PhysReg) &&
2174 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
2175 MachineOperand *KillOpnd =
2176 DeadStore->findRegisterUseOperand(PhysReg, true);
2177 // Note, if the store is storing a sub-register, it's possible the
2178 // super-register is needed below.
2179 if (KillOpnd && !KillOpnd->getSubReg() &&
2180 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
2181 MBB.insert(MII, NewMIs[0]);
2182 NewStore = NewMIs[1];
2183 MBB.insert(MII, NewStore);
2184 VRM.addSpillSlotUse(SS, NewStore);
Evan Cheng427a6b62009-05-15 06:48:19 +00002185 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002186 VRM.RemoveMachineInstrFromMaps(&MI);
2187 MBB.erase(&MI);
2188 Erased = true;
2189 --NextMII;
2190 --NextMII; // backtrack to the unfolded instruction.
2191 BackTracked = true;
2192 isDead = true;
2193 ++NumSUnfold;
2194 }
2195 }
2196 }
2197
2198 if (isDead) { // Previous store is dead.
2199 // If we get here, the store is dead, nuke it now.
Chris Lattner6456d382009-08-23 03:20:44 +00002200 DEBUG(errs() << "Removed dead store:\t" << *DeadStore);
Evan Cheng427a6b62009-05-15 06:48:19 +00002201 InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002202 VRM.RemoveMachineInstrFromMaps(DeadStore);
2203 MBB.erase(DeadStore);
2204 if (!NewStore)
2205 ++NumDSE;
2206 }
2207
2208 MaybeDeadStores[SS] = NULL;
2209 if (NewStore) {
2210 // Treat this store as a spill merged into a copy. That makes the
2211 // stack slot value available.
2212 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
2213 goto ProcessNextInst;
2214 }
2215 }
2216
2217 // If the spill slot value is available, and this is a new definition of
2218 // the value, the value is not available anymore.
2219 if (MR & VirtRegMap::isMod) {
2220 // Notice that the value in this stack slot has been modified.
2221 Spills.ModifyStackSlotOrReMat(SS);
2222
2223 // If this is *just* a mod of the value, check to see if this is just a
2224 // store to the spill slot (i.e. the spill got merged into the copy). If
2225 // so, realize that the vreg is available now, and add the store to the
2226 // MaybeDeadStore info.
2227 int StackSlot;
2228 if (!(MR & VirtRegMap::isRef)) {
2229 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
2230 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
2231 "Src hasn't been allocated yet?");
2232
2233 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
2234 Spills, RegKills, KillOps, TRI, VRM)) {
2235 NextMII = next(MII);
2236 BackTracked = true;
2237 goto ProcessNextInst;
2238 }
2239
2240 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2241 // this as a potentially dead store in case there is a subsequent
2242 // store into the stack slot without a read from it.
2243 MaybeDeadStores[StackSlot] = &MI;
2244
2245 // If the stack slot value was previously available in some other
2246 // register, change it now. Otherwise, make the register
2247 // available in PhysReg.
2248 Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
2249 }
2250 }
2251 }
2252 }
2253
2254 // Process all of the spilled defs.
2255 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2256 MachineOperand &MO = MI.getOperand(i);
2257 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
2258 continue;
2259
2260 unsigned VirtReg = MO.getReg();
2261 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
2262 // Check to see if this is a noop copy. If so, eliminate the
2263 // instruction before considering the dest reg to be changed.
Evan Cheng2578ba22009-07-01 01:59:31 +00002264 // Also check if it's copying from an "undef", if so, we can't
2265 // eliminate this or else the undef marker is lost and it will
2266 // confuses the scavenger. This is extremely rare.
Lang Hames87e3bca2009-05-06 02:36:21 +00002267 unsigned Src, Dst, SrcSR, DstSR;
Evan Chenga5dc45e2009-10-26 04:56:07 +00002268 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst &&
Evan Cheng2578ba22009-07-01 01:59:31 +00002269 !MI.findRegisterUseOperand(Src)->isUndef()) {
Lang Hames87e3bca2009-05-06 02:36:21 +00002270 ++NumDCE;
Chris Lattner6456d382009-08-23 03:20:44 +00002271 DEBUG(errs() << "Removing now-noop copy: " << MI);
Lang Hames87e3bca2009-05-06 02:36:21 +00002272 SmallVector<unsigned, 2> KillRegs;
Evan Cheng427a6b62009-05-15 06:48:19 +00002273 InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
Lang Hames87e3bca2009-05-06 02:36:21 +00002274 if (MO.isDead() && !KillRegs.empty()) {
2275 // Source register or an implicit super/sub-register use is killed.
2276 assert(KillRegs[0] == Dst ||
2277 TRI->isSubRegister(KillRegs[0], Dst) ||
2278 TRI->isSuperRegister(KillRegs[0], Dst));
2279 // Last def is now dead.
Evan Chengeca24fb2009-05-12 23:07:00 +00002280 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00002281 }
2282 VRM.RemoveMachineInstrFromMaps(&MI);
2283 MBB.erase(&MI);
2284 Erased = true;
2285 Spills.disallowClobberPhysReg(VirtReg);
2286 goto ProcessNextInst;
2287 }
Evan Cheng2578ba22009-07-01 01:59:31 +00002288
Lang Hames87e3bca2009-05-06 02:36:21 +00002289 // If it's not a no-op copy, it clobbers the value in the destreg.
2290 Spills.ClobberPhysReg(VirtReg);
2291 ReusedOperands.markClobbered(VirtReg);
2292
2293 // Check to see if this instruction is a load from a stack slot into
2294 // a register. If so, this provides the stack slot value in the reg.
2295 int FrameIdx;
2296 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
2297 assert(DestReg == VirtReg && "Unknown load situation!");
2298
2299 // If it is a folded reference, then it's not safe to clobber.
2300 bool Folded = FoldedSS.count(FrameIdx);
2301 // Otherwise, if it wasn't available, remember that it is now!
2302 Spills.addAvailable(FrameIdx, DestReg, !Folded);
2303 goto ProcessNextInst;
2304 }
2305
2306 continue;
2307 }
2308
2309 unsigned SubIdx = MO.getSubReg();
2310 bool DoReMat = VRM.isReMaterialized(VirtReg);
2311 if (DoReMat)
2312 ReMatDefs.insert(&MI);
2313
2314 // The only vregs left are stack slot definitions.
2315 int StackSlot = VRM.getStackSlot(VirtReg);
2316 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
2317
2318 // If this def is part of a two-address operand, make sure to execute
2319 // the store from the correct physical register.
2320 unsigned PhysReg;
2321 unsigned TiedOp;
2322 if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
2323 PhysReg = MI.getOperand(TiedOp).getReg();
2324 if (SubIdx) {
2325 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2326 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2327 "Can't find corresponding super-register!");
2328 PhysReg = SuperReg;
2329 }
2330 } else {
2331 PhysReg = VRM.getPhys(VirtReg);
2332 if (ReusedOperands.isClobbered(PhysReg)) {
2333 // Another def has taken the assigned physreg. It must have been a
2334 // use&def which got it due to reuse. Undo the reuse!
Evan Cheng5d885022009-07-21 09:15:00 +00002335 PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
2336 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
Lang Hames87e3bca2009-05-06 02:36:21 +00002337 }
2338 }
2339
2340 assert(PhysReg && "VR not assigned a physical register?");
2341 RegInfo->setPhysRegUsed(PhysReg);
2342 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2343 ReusedOperands.markClobbered(RReg);
2344 MI.getOperand(i).setReg(RReg);
2345 MI.getOperand(i).setSubReg(0);
2346
2347 if (!MO.isDead()) {
2348 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2349 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
2350 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
2351 NextMII = next(MII);
2352
2353 // Check to see if this is a noop copy. If so, eliminate the
2354 // instruction before considering the dest reg to be changed.
2355 {
2356 unsigned Src, Dst, SrcSR, DstSR;
Evan Chenga5dc45e2009-10-26 04:56:07 +00002357 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
Lang Hames87e3bca2009-05-06 02:36:21 +00002358 ++NumDCE;
Chris Lattner6456d382009-08-23 03:20:44 +00002359 DEBUG(errs() << "Removing now-noop copy: " << MI);
Evan Cheng427a6b62009-05-15 06:48:19 +00002360 InvalidateKills(MI, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002361 VRM.RemoveMachineInstrFromMaps(&MI);
2362 MBB.erase(&MI);
2363 Erased = true;
Evan Cheng427a6b62009-05-15 06:48:19 +00002364 UpdateKills(*LastStore, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002365 goto ProcessNextInst;
2366 }
2367 }
2368 }
2369 }
2370 ProcessNextInst:
Evan Cheng52484682009-07-18 02:10:10 +00002371 // Delete dead instructions without side effects.
Dale Johannesen3a6b9eb2009-10-12 18:49:00 +00002372 if (!Erased && !BackTracked && isSafeToDelete(MI)) {
Evan Cheng52484682009-07-18 02:10:10 +00002373 InvalidateKills(MI, TRI, RegKills, KillOps);
2374 VRM.RemoveMachineInstrFromMaps(&MI);
2375 MBB.erase(&MI);
2376 Erased = true;
2377 }
2378 if (!Erased)
2379 DistanceMap.insert(std::make_pair(&MI, Dist++));
Lang Hames87e3bca2009-05-06 02:36:21 +00002380 if (!Erased && !BackTracked) {
2381 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
Evan Cheng427a6b62009-05-15 06:48:19 +00002382 UpdateKills(*II, TRI, RegKills, KillOps);
Lang Hames87e3bca2009-05-06 02:36:21 +00002383 }
2384 MII = NextMII;
2385 }
2386
2387 }
2388
2389};
2390
Dan Gohman7db949d2009-08-07 01:32:21 +00002391}
2392
Lang Hames87e3bca2009-05-06 02:36:21 +00002393llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
2394 switch (RewriterOpt) {
Torok Edwinc23197a2009-07-14 16:55:14 +00002395 default: llvm_unreachable("Unreachable!");
Lang Hames87e3bca2009-05-06 02:36:21 +00002396 case local:
2397 return new LocalRewriter();
Lang Hamesf41538d2009-06-02 16:53:25 +00002398 case trivial:
2399 return new TrivialRewriter();
Lang Hames87e3bca2009-05-06 02:36:21 +00002400 }
2401}