blob: ccda66f0054e51974b2e1fc65e6010e5d472004b [file] [log] [blame]
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesCFG();
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
71
72 if (!StrongPHIElim) {
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
75 }
76
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
89 delete I->second;
90
91 r2iMap_.clear();
92
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
97 CloneMIs.pop_back();
98 mf_->DeleteMachineInstr(MI);
99 }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mf_ = &fn;
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115 computeIntervals();
116
117 numIntervals += getNumIntervals();
118
119 DEBUG(dump());
120 return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
128 OS << "\n";
129 }
130
131 printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
136
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
145 else
146 OS << getInstructionIndex(mii) << '\t' << *mii;
147 }
148 }
149}
150
151void LiveIntervals::dumpInstrs() const {
152 printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
159 return true;
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
169 }
170 if (!firstMI)
171 return false;
172
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
179 }
180 if (!lastMI)
181 return false;
182
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186 return true;
187
188 MachineBasicBlock::const_iterator E = lastMI;
189 ++E;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
192
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
197 continue;
198
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
202 if (!mop.isReg())
203 continue;
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
206 continue;
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
209 continue;
210 PhysReg = vrm.getPhys(PhysReg);
211 }
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 return true;
214 }
215 }
216
217 // No conflicts found.
218 return false;
219}
220
221/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it can check use as well.
223bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230 index != end;
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
233 if (!MI)
234 continue; // skip deleted instructions
235
236 if (JoinedCopies.count(MI))
237 continue;
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
240 if (!MO.isReg())
241 continue;
242 if (MO.isUse() && !CheckUse)
243 continue;
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246 continue;
247 if (tri_->isSubRegister(Reg, PhysReg))
248 return true;
249 }
250 }
251 }
252
253 return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
260 else
261 dbgs() << "%reg" << reg;
262}
263#endif
264
265void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
267 SlotIndex MIIdx,
268 MachineOperand& MO,
269 unsigned MOIdx,
270 LiveInterval &interval) {
271 DEBUG({
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
274 });
275
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
285 // of inputs.
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
288 VNInfo *ValNo;
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 CopyMI = mi;
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
298
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
305 SlotIndex killIdx;
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 else
309 killIdx = defIndex.getStoreIndex();
310
311 // If the kill happens after the definition, we have an intra-block
312 // live range.
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
320 return;
321 }
322 }
323
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
331
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
333
334 if (PHIJoin) {
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
341 } else {
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
344 // live interval.
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
351 }
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800352 }
353
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
360
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
363 if (PHIJoin) {
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
365 VNInfoAllocator);
366 ValNo->setIsPHIDef(true);
367 }
368 LiveRange LR(Start, killIdx, ValNo);
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
372 }
373
374 } else {
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
384 // into two values.
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
390
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
394
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
398
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
402
403 // The new value number (#1) is defined by the instruction we claimed
404 // defined value #0.
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
407 VNInfoAllocator);
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
409
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
413
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
419
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
422 if (MO.isDead())
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
424 OldValNo));
425
426 DEBUG({
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
429 });
430 } else {
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700435
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
439
440 VNInfo *ValNo;
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
445 CopyMI = mi;
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
447
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700453 DEBUG(dbgs() << " phi-join +" << LR);
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800454 }
455 }
456
457 DEBUG(dbgs() << '\n');
458}
459
460void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
462 SlotIndex MIIdx,
463 MachineOperand& MO,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
468 DEBUG({
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
471 });
472
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
479
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
485 if (MO.isDead()) {
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
488 goto exit;
489 }
490
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
496
497 if (mi->isDebugValue())
498 continue;
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
501
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
505 goto exit;
506 } else {
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
508 if (DefIdx != -1) {
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
512 } else {
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
519 }
520 goto exit;
521 }
522 }
523
524 baseIndex = baseIndex.getNextIndex();
525 }
526
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
532
533exit:
534 assert(start < end && "did not find end of interval?");
535
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
547}
548
549void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
551 SlotIndex MIIdx,
552 MachineOperand& MO,
553 unsigned MOIdx) {
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
562 CopyMI = MI;
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
572 }
573}
574
575void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
576 SlotIndex MIIdx,
577 LiveInterval &interval, bool isAlias) {
578 DEBUG({
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
581 });
582
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 SlotIndex baseIndex = MIIdx;
587 SlotIndex start = baseIndex;
588 if (getInstructionFromIndex(baseIndex) == 0)
589 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
590
591 SlotIndex end = baseIndex;
592 bool SeenDefUse = false;
593
594 MachineBasicBlock::iterator E = MBB->end();
595 while (mi != E) {
596 if (mi->isDebugValue()) {
597 ++mi;
Shih-wei Liaoe4454322010-04-07 12:21:42 -0700598 if (mi != E && !mi->isDebugValue()) {
599 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
600 }
Shih-wei Liaoe264f622010-02-10 11:10:31 -0800601 continue;
602 }
603 if (mi->killsRegister(interval.reg, tri_)) {
604 DEBUG(dbgs() << " killed");
605 end = baseIndex.getDefIndex();
606 SeenDefUse = true;
607 break;
608 } else if (mi->modifiesRegister(interval.reg, tri_)) {
609 // Another instruction redefines the register before it is ever read.
610 // Then the register is essentially dead at the instruction that defines
611 // it. Hence its interval is:
612 // [defSlot(def), defSlot(def)+1)
613 DEBUG(dbgs() << " dead");
614 end = start.getStoreIndex();
615 SeenDefUse = true;
616 break;
617 }
618
619 ++mi;
620 if (mi != E && !mi->isDebugValue()) {
621 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
622 }
623 }
624
625 // Live-in register might not be used at all.
626 if (!SeenDefUse) {
627 if (isAlias) {
628 DEBUG(dbgs() << " dead");
629 end = MIIdx.getStoreIndex();
630 } else {
631 DEBUG(dbgs() << " live through");
632 end = baseIndex;
633 }
634 }
635
636 VNInfo *vni =
637 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
638 0, false, VNInfoAllocator);
639 vni->setIsPHIDef(true);
640 LiveRange LR(start, end, vni);
641
642 interval.addRange(LR);
643 LR.valno->addKill(end);
644 DEBUG(dbgs() << " +" << LR << '\n');
645}
646
647/// computeIntervals - computes the live intervals for virtual
648/// registers. for some ordering of the machine instructions [1,N] a
649/// live interval is an interval [i, j) where 1 <= i <= j < N for
650/// which a variable is live
651void LiveIntervals::computeIntervals() {
652 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
653 << "********** Function: "
654 << ((Value*)mf_->getFunction())->getName() << '\n');
655
656 SmallVector<unsigned, 8> UndefUses;
657 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
658 MBBI != E; ++MBBI) {
659 MachineBasicBlock *MBB = MBBI;
660 if (MBB->empty())
661 continue;
662
663 // Track the index of the current machine instr.
664 SlotIndex MIIndex = getMBBStartIdx(MBB);
665 DEBUG(dbgs() << MBB->getName() << ":\n");
666
667 // Create intervals for live-ins to this BB first.
668 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
669 LE = MBB->livein_end(); LI != LE; ++LI) {
670 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
671 // Multiple live-ins can alias the same register.
672 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
673 if (!hasInterval(*AS))
674 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
675 true);
676 }
677
678 // Skip over empty initial indices.
679 if (getInstructionFromIndex(MIIndex) == 0)
680 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
681
682 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
683 MI != miEnd; ++MI) {
684 DEBUG(dbgs() << MIIndex << "\t" << *MI);
685 if (MI->isDebugValue())
686 continue;
687
688 // Handle defs.
689 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
690 MachineOperand &MO = MI->getOperand(i);
691 if (!MO.isReg() || !MO.getReg())
692 continue;
693
694 // handle register defs - build intervals
695 if (MO.isDef())
696 handleRegisterDef(MBB, MI, MIIndex, MO, i);
697 else if (MO.isUndef())
698 UndefUses.push_back(MO.getReg());
699 }
700
701 // Move to the next instr slot.
702 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
703 }
704 }
705
706 // Create empty intervals for registers defined by implicit_def's (except
707 // for those implicit_def that define values which are liveout of their
708 // blocks.
709 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
710 unsigned UndefReg = UndefUses[i];
711 (void)getOrCreateInterval(UndefReg);
712 }
713}
714
715LiveInterval* LiveIntervals::createInterval(unsigned reg) {
716 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
717 return new LiveInterval(reg, Weight);
718}
719
720/// dupInterval - Duplicate a live interval. The caller is responsible for
721/// managing the allocated memory.
722LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
723 LiveInterval *NewLI = createInterval(li->reg);
724 NewLI->Copy(*li, mri_, getVNInfoAllocator());
725 return NewLI;
726}
727
728/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
729/// copy field and returns the source register that defines it.
730unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
731 if (!VNI->getCopy())
732 return 0;
733
734 if (VNI->getCopy()->isExtractSubreg()) {
735 // If it's extracting out of a physical register, return the sub-register.
736 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
737 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
738 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
739 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
740 if (SrcSubReg == DstSubReg)
741 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
742 // reg1034 can still be coalesced to EDX.
743 return Reg;
744 assert(DstSubReg == 0);
745 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
746 }
747 return Reg;
748 } else if (VNI->getCopy()->isInsertSubreg() ||
749 VNI->getCopy()->isSubregToReg())
750 return VNI->getCopy()->getOperand(2).getReg();
751
752 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
753 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
754 return SrcReg;
755 llvm_unreachable("Unrecognized copy instruction!");
756 return 0;
757}
758
759//===----------------------------------------------------------------------===//
760// Register allocator hooks.
761//
762
763/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
764/// allow one) virtual register operand, then its uses are implicitly using
765/// the register. Returns the virtual register.
766unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
767 MachineInstr *MI) const {
768 unsigned RegOp = 0;
769 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
770 MachineOperand &MO = MI->getOperand(i);
771 if (!MO.isReg() || !MO.isUse())
772 continue;
773 unsigned Reg = MO.getReg();
774 if (Reg == 0 || Reg == li.reg)
775 continue;
776
777 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
778 !allocatableRegs_[Reg])
779 continue;
780 // FIXME: For now, only remat MI with at most one register operand.
781 assert(!RegOp &&
782 "Can't rematerialize instruction with multiple register operand!");
783 RegOp = MO.getReg();
784#ifndef NDEBUG
785 break;
786#endif
787 }
788 return RegOp;
789}
790
791/// isValNoAvailableAt - Return true if the val# of the specified interval
792/// which reaches the given instruction also reaches the specified use index.
793bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794 SlotIndex UseIdx) const {
795 SlotIndex Index = getInstructionIndex(MI);
796 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798 return UI != li.end() && UI->valno == ValNo;
799}
800
801/// isReMaterializable - Returns true if the definition MI of the specified
802/// val# of the specified interval is re-materializable.
803bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804 const VNInfo *ValNo, MachineInstr *MI,
805 SmallVectorImpl<LiveInterval*> &SpillIs,
806 bool &isLoad) {
807 if (DisableReMat)
808 return false;
809
810 if (!tii_->isTriviallyReMaterializable(MI, aa_))
811 return false;
812
813 // Target-specific code can mark an instruction as being rematerializable
814 // if it has one virtual reg use, though it had better be something like
815 // a PIC base register which is likely to be live everywhere.
816 unsigned ImpUse = getReMatImplicitUse(li, MI);
817 if (ImpUse) {
818 const LiveInterval &ImpLi = getInterval(ImpUse);
819 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
820 re = mri_->use_end(); ri != re; ++ri) {
821 MachineInstr *UseMI = &*ri;
822 SlotIndex UseIdx = getInstructionIndex(UseMI);
823 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
824 continue;
825 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
826 return false;
827 }
828
829 // If a register operand of the re-materialized instruction is going to
830 // be spilled next, then it's not legal to re-materialize this instruction.
831 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
832 if (ImpUse == SpillIs[i]->reg)
833 return false;
834 }
835 return true;
836}
837
838/// isReMaterializable - Returns true if the definition MI of the specified
839/// val# of the specified interval is re-materializable.
840bool LiveIntervals::isReMaterializable(const LiveInterval &li,
841 const VNInfo *ValNo, MachineInstr *MI) {
842 SmallVector<LiveInterval*, 4> Dummy1;
843 bool Dummy2;
844 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
845}
846
847/// isReMaterializable - Returns true if every definition of MI of every
848/// val# of the specified interval is re-materializable.
849bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850 SmallVectorImpl<LiveInterval*> &SpillIs,
851 bool &isLoad) {
852 isLoad = false;
853 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
854 i != e; ++i) {
855 const VNInfo *VNI = *i;
856 if (VNI->isUnused())
857 continue; // Dead val#.
858 // Is the def for the val# rematerializable?
859 if (!VNI->isDefAccurate())
860 return false;
861 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
862 bool DefIsLoad = false;
863 if (!ReMatDefMI ||
864 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
865 return false;
866 isLoad |= DefIsLoad;
867 }
868 return true;
869}
870
871/// FilterFoldedOps - Filter out two-address use operands. Return
872/// true if it finds any issue with the operands that ought to prevent
873/// folding.
874static bool FilterFoldedOps(MachineInstr *MI,
875 SmallVector<unsigned, 2> &Ops,
876 unsigned &MRInfo,
877 SmallVector<unsigned, 2> &FoldOps) {
878 MRInfo = 0;
879 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
880 unsigned OpIdx = Ops[i];
881 MachineOperand &MO = MI->getOperand(OpIdx);
882 // FIXME: fold subreg use.
883 if (MO.getSubReg())
884 return true;
885 if (MO.isDef())
886 MRInfo |= (unsigned)VirtRegMap::isMod;
887 else {
888 // Filter out two-address use operand(s).
889 if (MI->isRegTiedToDefOperand(OpIdx)) {
890 MRInfo = VirtRegMap::isModRef;
891 continue;
892 }
893 MRInfo |= (unsigned)VirtRegMap::isRef;
894 }
895 FoldOps.push_back(OpIdx);
896 }
897 return false;
898}
899
900
901/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
902/// slot / to reg or any rematerialized load into ith operand of specified
903/// MI. If it is successul, MI is updated with the newly created MI and
904/// returns true.
905bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
906 VirtRegMap &vrm, MachineInstr *DefMI,
907 SlotIndex InstrIdx,
908 SmallVector<unsigned, 2> &Ops,
909 bool isSS, int Slot, unsigned Reg) {
910 // If it is an implicit def instruction, just delete it.
911 if (MI->isImplicitDef()) {
912 RemoveMachineInstrFromMaps(MI);
913 vrm.RemoveMachineInstrFromMaps(MI);
914 MI->eraseFromParent();
915 ++numFolds;
916 return true;
917 }
918
919 // Filter the list of operand indexes that are to be folded. Abort if
920 // any operand will prevent folding.
921 unsigned MRInfo = 0;
922 SmallVector<unsigned, 2> FoldOps;
923 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
924 return false;
925
926 // The only time it's safe to fold into a two address instruction is when
927 // it's folding reload and spill from / into a spill stack slot.
928 if (DefMI && (MRInfo & VirtRegMap::isMod))
929 return false;
930
931 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
932 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
933 if (fmi) {
934 // Remember this instruction uses the spill slot.
935 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
936
937 // Attempt to fold the memory reference into the instruction. If
938 // we can do this, we don't need to insert spill code.
939 MachineBasicBlock &MBB = *MI->getParent();
940 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
941 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
942 vrm.transferSpillPts(MI, fmi);
943 vrm.transferRestorePts(MI, fmi);
944 vrm.transferEmergencySpills(MI, fmi);
945 ReplaceMachineInstrInMaps(MI, fmi);
946 MI = MBB.insert(MBB.erase(MI), fmi);
947 ++numFolds;
948 return true;
949 }
950 return false;
951}
952
953/// canFoldMemoryOperand - Returns true if the specified load / store
954/// folding is possible.
955bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
956 SmallVector<unsigned, 2> &Ops,
957 bool ReMat) const {
958 // Filter the list of operand indexes that are to be folded. Abort if
959 // any operand will prevent folding.
960 unsigned MRInfo = 0;
961 SmallVector<unsigned, 2> FoldOps;
962 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
963 return false;
964
965 // It's only legal to remat for a use, not a def.
966 if (ReMat && (MRInfo & VirtRegMap::isMod))
967 return false;
968
969 return tii_->canFoldMemoryOperand(MI, FoldOps);
970}
971
972bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
973 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
974
975 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
976
977 if (mbb == 0)
978 return false;
979
980 for (++itr; itr != li.ranges.end(); ++itr) {
981 MachineBasicBlock *mbb2 =
982 indexes_->getMBBCoveringRange(itr->start, itr->end);
983
984 if (mbb2 != mbb)
985 return false;
986 }
987
988 return true;
989}
990
991/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
992/// interval on to-be re-materialized operands of MI) with new register.
993void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
994 MachineInstr *MI, unsigned NewVReg,
995 VirtRegMap &vrm) {
996 // There is an implicit use. That means one of the other operand is
997 // being remat'ed and the remat'ed instruction has li.reg as an
998 // use operand. Make sure we rewrite that as well.
999 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1000 MachineOperand &MO = MI->getOperand(i);
1001 if (!MO.isReg())
1002 continue;
1003 unsigned Reg = MO.getReg();
1004 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1005 continue;
1006 if (!vrm.isReMaterialized(Reg))
1007 continue;
1008 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1009 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1010 if (UseMO)
1011 UseMO->setReg(NewVReg);
1012 }
1013}
1014
1015/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1016/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1017bool LiveIntervals::
1018rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1019 bool TrySplit, SlotIndex index, SlotIndex end,
1020 MachineInstr *MI,
1021 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1022 unsigned Slot, int LdSlot,
1023 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1024 VirtRegMap &vrm,
1025 const TargetRegisterClass* rc,
1026 SmallVector<int, 4> &ReMatIds,
1027 const MachineLoopInfo *loopInfo,
1028 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1029 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1030 std::vector<LiveInterval*> &NewLIs) {
1031 bool CanFold = false;
1032 RestartInstruction:
1033 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1034 MachineOperand& mop = MI->getOperand(i);
1035 if (!mop.isReg())
1036 continue;
1037 unsigned Reg = mop.getReg();
1038 unsigned RegI = Reg;
1039 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1040 continue;
1041 if (Reg != li.reg)
1042 continue;
1043
1044 bool TryFold = !DefIsReMat;
1045 bool FoldSS = true; // Default behavior unless it's a remat.
1046 int FoldSlot = Slot;
1047 if (DefIsReMat) {
1048 // If this is the rematerializable definition MI itself and
1049 // all of its uses are rematerialized, simply delete it.
1050 if (MI == ReMatOrigDefMI && CanDelete) {
1051 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1052 << MI << '\n');
1053 RemoveMachineInstrFromMaps(MI);
1054 vrm.RemoveMachineInstrFromMaps(MI);
1055 MI->eraseFromParent();
1056 break;
1057 }
1058
1059 // If def for this use can't be rematerialized, then try folding.
1060 // If def is rematerializable and it's a load, also try folding.
1061 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1062 if (isLoad) {
1063 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1064 FoldSS = isLoadSS;
1065 FoldSlot = LdSlot;
1066 }
1067 }
1068
1069 // Scan all of the operands of this instruction rewriting operands
1070 // to use NewVReg instead of li.reg as appropriate. We do this for
1071 // two reasons:
1072 //
1073 // 1. If the instr reads the same spilled vreg multiple times, we
1074 // want to reuse the NewVReg.
1075 // 2. If the instr is a two-addr instruction, we are required to
1076 // keep the src/dst regs pinned.
1077 //
1078 // Keep track of whether we replace a use and/or def so that we can
1079 // create the spill interval with the appropriate range.
1080
1081 HasUse = mop.isUse();
1082 HasDef = mop.isDef();
1083 SmallVector<unsigned, 2> Ops;
1084 Ops.push_back(i);
1085 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1086 const MachineOperand &MOj = MI->getOperand(j);
1087 if (!MOj.isReg())
1088 continue;
1089 unsigned RegJ = MOj.getReg();
1090 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1091 continue;
1092 if (RegJ == RegI) {
1093 Ops.push_back(j);
1094 if (!MOj.isUndef()) {
1095 HasUse |= MOj.isUse();
1096 HasDef |= MOj.isDef();
1097 }
1098 }
1099 }
1100
1101 // Create a new virtual register for the spill interval.
1102 // Create the new register now so we can map the fold instruction
1103 // to the new register so when it is unfolded we get the correct
1104 // answer.
1105 bool CreatedNewVReg = false;
1106 if (NewVReg == 0) {
1107 NewVReg = mri_->createVirtualRegister(rc);
1108 vrm.grow();
1109 CreatedNewVReg = true;
1110
1111 // The new virtual register should get the same allocation hints as the
1112 // old one.
1113 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1114 if (Hint.first || Hint.second)
1115 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1116 }
1117
1118 if (!TryFold)
1119 CanFold = false;
1120 else {
1121 // Do not fold load / store here if we are splitting. We'll find an
1122 // optimal point to insert a load / store later.
1123 if (!TrySplit) {
1124 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1125 Ops, FoldSS, FoldSlot, NewVReg)) {
1126 // Folding the load/store can completely change the instruction in
1127 // unpredictable ways, rescan it from the beginning.
1128
1129 if (FoldSS) {
1130 // We need to give the new vreg the same stack slot as the
1131 // spilled interval.
1132 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1133 }
1134
1135 HasUse = false;
1136 HasDef = false;
1137 CanFold = false;
1138 if (isNotInMIMap(MI))
1139 break;
1140 goto RestartInstruction;
1141 }
1142 } else {
1143 // We'll try to fold it later if it's profitable.
1144 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1145 }
1146 }
1147
1148 mop.setReg(NewVReg);
1149 if (mop.isImplicit())
1150 rewriteImplicitOps(li, MI, NewVReg, vrm);
1151
1152 // Reuse NewVReg for other reads.
1153 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1154 MachineOperand &mopj = MI->getOperand(Ops[j]);
1155 mopj.setReg(NewVReg);
1156 if (mopj.isImplicit())
1157 rewriteImplicitOps(li, MI, NewVReg, vrm);
1158 }
1159
1160 if (CreatedNewVReg) {
1161 if (DefIsReMat) {
1162 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1163 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1164 // Each valnum may have its own remat id.
1165 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1166 } else {
1167 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1168 }
1169 if (!CanDelete || (HasUse && HasDef)) {
1170 // If this is a two-addr instruction then its use operands are
1171 // rematerializable but its def is not. It should be assigned a
1172 // stack slot.
1173 vrm.assignVirt2StackSlot(NewVReg, Slot);
1174 }
1175 } else {
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1177 }
1178 } else if (HasUse && HasDef &&
1179 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1180 // If this interval hasn't been assigned a stack slot (because earlier
1181 // def is a deleted remat def), do it now.
1182 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1183 vrm.assignVirt2StackSlot(NewVReg, Slot);
1184 }
1185
1186 // Re-matting an instruction with virtual register use. Add the
1187 // register as an implicit use on the use MI.
1188 if (DefIsReMat && ImpUse)
1189 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1190
1191 // Create a new register interval for this spill / remat.
1192 LiveInterval &nI = getOrCreateInterval(NewVReg);
1193 if (CreatedNewVReg) {
1194 NewLIs.push_back(&nI);
1195 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1196 if (TrySplit)
1197 vrm.setIsSplitFromReg(NewVReg, li.reg);
1198 }
1199
1200 if (HasUse) {
1201 if (CreatedNewVReg) {
1202 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1203 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1204 DEBUG(dbgs() << " +" << LR);
1205 nI.addRange(LR);
1206 } else {
1207 // Extend the split live interval to this def / use.
1208 SlotIndex End = index.getDefIndex();
1209 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1210 nI.getValNumInfo(nI.getNumValNums()-1));
1211 DEBUG(dbgs() << " +" << LR);
1212 nI.addRange(LR);
1213 }
1214 }
1215 if (HasDef) {
1216 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1217 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1218 DEBUG(dbgs() << " +" << LR);
1219 nI.addRange(LR);
1220 }
1221
1222 DEBUG({
1223 dbgs() << "\t\t\t\tAdded new interval: ";
1224 nI.print(dbgs(), tri_);
1225 dbgs() << '\n';
1226 });
1227 }
1228 return CanFold;
1229}
1230bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1231 const VNInfo *VNI,
1232 MachineBasicBlock *MBB,
1233 SlotIndex Idx) const {
1234 SlotIndex End = getMBBEndIdx(MBB);
1235 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1236 if (VNI->kills[j].isPHI())
1237 continue;
1238
1239 SlotIndex KillIdx = VNI->kills[j];
1240 if (KillIdx > Idx && KillIdx <= End)
1241 return true;
1242 }
1243 return false;
1244}
1245
1246/// RewriteInfo - Keep track of machine instrs that will be rewritten
1247/// during spilling.
1248namespace {
1249 struct RewriteInfo {
1250 SlotIndex Index;
1251 MachineInstr *MI;
1252 bool HasUse;
1253 bool HasDef;
1254 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1255 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1256 };
1257
1258 struct RewriteInfoCompare {
1259 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1260 return LHS.Index < RHS.Index;
1261 }
1262 };
1263}
1264
1265void LiveIntervals::
1266rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1267 LiveInterval::Ranges::const_iterator &I,
1268 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1269 unsigned Slot, int LdSlot,
1270 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1271 VirtRegMap &vrm,
1272 const TargetRegisterClass* rc,
1273 SmallVector<int, 4> &ReMatIds,
1274 const MachineLoopInfo *loopInfo,
1275 BitVector &SpillMBBs,
1276 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1277 BitVector &RestoreMBBs,
1278 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1279 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1280 std::vector<LiveInterval*> &NewLIs) {
1281 bool AllCanFold = true;
1282 unsigned NewVReg = 0;
1283 SlotIndex start = I->start.getBaseIndex();
1284 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1285
1286 // First collect all the def / use in this live range that will be rewritten.
1287 // Make sure they are sorted according to instruction index.
1288 std::vector<RewriteInfo> RewriteMIs;
1289 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1290 re = mri_->reg_end(); ri != re; ) {
1291 MachineInstr *MI = &*ri;
1292 MachineOperand &O = ri.getOperand();
1293 ++ri;
1294 if (MI->isDebugValue()) {
1295 // Remove debug info for now.
1296 O.setReg(0U);
1297 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1298 continue;
1299 }
1300 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1301 SlotIndex index = getInstructionIndex(MI);
1302 if (index < start || index >= end)
1303 continue;
1304
1305 if (O.isUndef())
1306 // Must be defined by an implicit def. It should not be spilled. Note,
1307 // this is for correctness reason. e.g.
1308 // 8 %reg1024<def> = IMPLICIT_DEF
1309 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1310 // The live range [12, 14) are not part of the r1024 live interval since
1311 // it's defined by an implicit def. It will not conflicts with live
1312 // interval of r1025. Now suppose both registers are spilled, you can
1313 // easily see a situation where both registers are reloaded before
1314 // the INSERT_SUBREG and both target registers that would overlap.
1315 continue;
1316 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1317 }
1318 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1319
1320 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1321 // Now rewrite the defs and uses.
1322 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1323 RewriteInfo &rwi = RewriteMIs[i];
1324 ++i;
1325 SlotIndex index = rwi.Index;
1326 bool MIHasUse = rwi.HasUse;
1327 bool MIHasDef = rwi.HasDef;
1328 MachineInstr *MI = rwi.MI;
1329 // If MI def and/or use the same register multiple times, then there
1330 // are multiple entries.
1331 unsigned NumUses = MIHasUse;
1332 while (i != e && RewriteMIs[i].MI == MI) {
1333 assert(RewriteMIs[i].Index == index);
1334 bool isUse = RewriteMIs[i].HasUse;
1335 if (isUse) ++NumUses;
1336 MIHasUse |= isUse;
1337 MIHasDef |= RewriteMIs[i].HasDef;
1338 ++i;
1339 }
1340 MachineBasicBlock *MBB = MI->getParent();
1341
1342 if (ImpUse && MI != ReMatDefMI) {
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001343 // Re-matting an instruction with virtual register use. Prevent interval
1344 // from being spilled.
1345 getInterval(ImpUse).markNotSpillable();
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001346 }
1347
1348 unsigned MBBId = MBB->getNumber();
1349 unsigned ThisVReg = 0;
1350 if (TrySplit) {
1351 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1352 if (NVI != MBBVRegsMap.end()) {
1353 ThisVReg = NVI->second;
1354 // One common case:
1355 // x = use
1356 // ...
1357 // ...
1358 // def = ...
1359 // = use
1360 // It's better to start a new interval to avoid artifically
1361 // extend the new interval.
1362 if (MIHasDef && !MIHasUse) {
1363 MBBVRegsMap.erase(MBB->getNumber());
1364 ThisVReg = 0;
1365 }
1366 }
1367 }
1368
1369 bool IsNew = ThisVReg == 0;
1370 if (IsNew) {
1371 // This ends the previous live interval. If all of its def / use
1372 // can be folded, give it a low spill weight.
1373 if (NewVReg && TrySplit && AllCanFold) {
1374 LiveInterval &nI = getOrCreateInterval(NewVReg);
1375 nI.weight /= 10.0F;
1376 }
1377 AllCanFold = true;
1378 }
1379 NewVReg = ThisVReg;
1380
1381 bool HasDef = false;
1382 bool HasUse = false;
1383 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1384 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1385 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1386 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1387 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1388 if (!HasDef && !HasUse)
1389 continue;
1390
1391 AllCanFold &= CanFold;
1392
1393 // Update weight of spill interval.
1394 LiveInterval &nI = getOrCreateInterval(NewVReg);
1395 if (!TrySplit) {
1396 // The spill weight is now infinity as it cannot be spilled again.
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001397 nI.markNotSpillable();
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001398 continue;
1399 }
1400
1401 // Keep track of the last def and first use in each MBB.
1402 if (HasDef) {
1403 if (MI != ReMatOrigDefMI || !CanDelete) {
1404 bool HasKill = false;
1405 if (!HasUse)
1406 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1407 else {
1408 // If this is a two-address code, then this index starts a new VNInfo.
1409 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1410 if (VNI)
1411 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1412 }
1413 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1414 SpillIdxes.find(MBBId);
1415 if (!HasKill) {
1416 if (SII == SpillIdxes.end()) {
1417 std::vector<SRInfo> S;
1418 S.push_back(SRInfo(index, NewVReg, true));
1419 SpillIdxes.insert(std::make_pair(MBBId, S));
1420 } else if (SII->second.back().vreg != NewVReg) {
1421 SII->second.push_back(SRInfo(index, NewVReg, true));
1422 } else if (index > SII->second.back().index) {
1423 // If there is an earlier def and this is a two-address
1424 // instruction, then it's not possible to fold the store (which
1425 // would also fold the load).
1426 SRInfo &Info = SII->second.back();
1427 Info.index = index;
1428 Info.canFold = !HasUse;
1429 }
1430 SpillMBBs.set(MBBId);
1431 } else if (SII != SpillIdxes.end() &&
1432 SII->second.back().vreg == NewVReg &&
1433 index > SII->second.back().index) {
1434 // There is an earlier def that's not killed (must be two-address).
1435 // The spill is no longer needed.
1436 SII->second.pop_back();
1437 if (SII->second.empty()) {
1438 SpillIdxes.erase(MBBId);
1439 SpillMBBs.reset(MBBId);
1440 }
1441 }
1442 }
1443 }
1444
1445 if (HasUse) {
1446 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1447 SpillIdxes.find(MBBId);
1448 if (SII != SpillIdxes.end() &&
1449 SII->second.back().vreg == NewVReg &&
1450 index > SII->second.back().index)
1451 // Use(s) following the last def, it's not safe to fold the spill.
1452 SII->second.back().canFold = false;
1453 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1454 RestoreIdxes.find(MBBId);
1455 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1456 // If we are splitting live intervals, only fold if it's the first
1457 // use and there isn't another use later in the MBB.
1458 RII->second.back().canFold = false;
1459 else if (IsNew) {
1460 // Only need a reload if there isn't an earlier def / use.
1461 if (RII == RestoreIdxes.end()) {
1462 std::vector<SRInfo> Infos;
1463 Infos.push_back(SRInfo(index, NewVReg, true));
1464 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1465 } else {
1466 RII->second.push_back(SRInfo(index, NewVReg, true));
1467 }
1468 RestoreMBBs.set(MBBId);
1469 }
1470 }
1471
1472 // Update spill weight.
1473 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1474 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1475 }
1476
1477 if (NewVReg && TrySplit && AllCanFold) {
1478 // If all of its def / use can be folded, give it a low spill weight.
1479 LiveInterval &nI = getOrCreateInterval(NewVReg);
1480 nI.weight /= 10.0F;
1481 }
1482}
1483
1484bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1485 unsigned vr, BitVector &RestoreMBBs,
1486 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1487 if (!RestoreMBBs[Id])
1488 return false;
1489 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1490 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1491 if (Restores[i].index == index &&
1492 Restores[i].vreg == vr &&
1493 Restores[i].canFold)
1494 return true;
1495 return false;
1496}
1497
1498void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1499 unsigned vr, BitVector &RestoreMBBs,
1500 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1501 if (!RestoreMBBs[Id])
1502 return;
1503 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1504 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1505 if (Restores[i].index == index && Restores[i].vreg)
1506 Restores[i].index = SlotIndex();
1507}
1508
1509/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1510/// spilled and create empty intervals for their uses.
1511void
1512LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1513 const TargetRegisterClass* rc,
1514 std::vector<LiveInterval*> &NewLIs) {
1515 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1516 re = mri_->reg_end(); ri != re; ) {
1517 MachineOperand &O = ri.getOperand();
1518 MachineInstr *MI = &*ri;
1519 ++ri;
1520 if (O.isDef()) {
1521 assert(MI->isImplicitDef() &&
1522 "Register def was not rewritten?");
1523 RemoveMachineInstrFromMaps(MI);
1524 vrm.RemoveMachineInstrFromMaps(MI);
1525 MI->eraseFromParent();
1526 } else {
1527 // This must be an use of an implicit_def so it's not part of the live
1528 // interval. Create a new empty live interval for it.
1529 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1530 unsigned NewVReg = mri_->createVirtualRegister(rc);
1531 vrm.grow();
1532 vrm.setIsImplicitlyDefined(NewVReg);
1533 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1534 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1535 MachineOperand &MO = MI->getOperand(i);
1536 if (MO.isReg() && MO.getReg() == li.reg) {
1537 MO.setReg(NewVReg);
1538 MO.setIsUndef();
1539 }
1540 }
1541 }
1542 }
1543}
1544
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001545float
1546LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1547 // Limit the loop depth ridiculousness.
1548 if (loopDepth > 200)
1549 loopDepth = 200;
1550
1551 // The loop depth is used to roughly estimate the number of times the
1552 // instruction is executed. Something like 10^d is simple, but will quickly
1553 // overflow a float. This expression behaves like 10^d for small d, but is
1554 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1555 // headroom before overflow.
1556 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1557
1558 return (isDef + isUse) * lc;
1559}
1560
1561void
1562LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1563 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1564 normalizeSpillWeight(*NewLIs[i]);
1565}
1566
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001567std::vector<LiveInterval*> LiveIntervals::
1568addIntervalsForSpillsFast(const LiveInterval &li,
1569 const MachineLoopInfo *loopInfo,
1570 VirtRegMap &vrm) {
1571 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1572
1573 std::vector<LiveInterval*> added;
1574
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001575 assert(li.isSpillable() && "attempt to spill already spilled interval!");
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001576
1577 DEBUG({
1578 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1579 li.dump();
1580 dbgs() << '\n';
1581 });
1582
1583 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1584
1585 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1586 while (RI != mri_->reg_end()) {
1587 MachineInstr* MI = &*RI;
1588
1589 SmallVector<unsigned, 2> Indices;
1590 bool HasUse = false;
1591 bool HasDef = false;
1592
1593 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1594 MachineOperand& mop = MI->getOperand(i);
1595 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1596
1597 HasUse |= MI->getOperand(i).isUse();
1598 HasDef |= MI->getOperand(i).isDef();
1599
1600 Indices.push_back(i);
1601 }
1602
1603 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1604 Indices, true, slot, li.reg)) {
1605 unsigned NewVReg = mri_->createVirtualRegister(rc);
1606 vrm.grow();
1607 vrm.assignVirt2StackSlot(NewVReg, slot);
1608
1609 // create a new register for this spill
1610 LiveInterval &nI = getOrCreateInterval(NewVReg);
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001611 nI.markNotSpillable();
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001612
1613 // Rewrite register operands to use the new vreg.
1614 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1615 E = Indices.end(); I != E; ++I) {
1616 MI->getOperand(*I).setReg(NewVReg);
1617
1618 if (MI->getOperand(*I).isUse())
1619 MI->getOperand(*I).setIsKill(true);
1620 }
1621
1622 // Fill in the new live interval.
1623 SlotIndex index = getInstructionIndex(MI);
1624 if (HasUse) {
1625 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1626 nI.getNextValue(SlotIndex(), 0, false,
1627 getVNInfoAllocator()));
1628 DEBUG(dbgs() << " +" << LR);
1629 nI.addRange(LR);
1630 vrm.addRestorePoint(NewVReg, MI);
1631 }
1632 if (HasDef) {
1633 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1634 nI.getNextValue(SlotIndex(), 0, false,
1635 getVNInfoAllocator()));
1636 DEBUG(dbgs() << " +" << LR);
1637 nI.addRange(LR);
1638 vrm.addSpillPoint(NewVReg, true, MI);
1639 }
1640
1641 added.push_back(&nI);
1642
1643 DEBUG({
1644 dbgs() << "\t\t\t\tadded new interval: ";
1645 nI.dump();
1646 dbgs() << '\n';
1647 });
1648 }
1649
1650
1651 RI = mri_->reg_begin(li.reg);
1652 }
1653
1654 return added;
1655}
1656
1657std::vector<LiveInterval*> LiveIntervals::
1658addIntervalsForSpills(const LiveInterval &li,
1659 SmallVectorImpl<LiveInterval*> &SpillIs,
1660 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1661
1662 if (EnableFastSpilling)
1663 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1664
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001665 assert(li.isSpillable() && "attempt to spill already spilled interval!");
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001666
1667 DEBUG({
1668 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1669 li.print(dbgs(), tri_);
1670 dbgs() << '\n';
1671 });
1672
1673 // Each bit specify whether a spill is required in the MBB.
1674 BitVector SpillMBBs(mf_->getNumBlockIDs());
1675 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1676 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1677 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1678 DenseMap<unsigned,unsigned> MBBVRegsMap;
1679 std::vector<LiveInterval*> NewLIs;
1680 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1681
1682 unsigned NumValNums = li.getNumValNums();
1683 SmallVector<MachineInstr*, 4> ReMatDefs;
1684 ReMatDefs.resize(NumValNums, NULL);
1685 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1686 ReMatOrigDefs.resize(NumValNums, NULL);
1687 SmallVector<int, 4> ReMatIds;
1688 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1689 BitVector ReMatDelete(NumValNums);
1690 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1691
1692 // Spilling a split live interval. It cannot be split any further. Also,
1693 // it's also guaranteed to be a single val# / range interval.
1694 if (vrm.getPreSplitReg(li.reg)) {
1695 vrm.setIsSplitFromReg(li.reg, 0);
1696 // Unset the split kill marker on the last use.
1697 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1698 if (KillIdx != SlotIndex()) {
1699 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1700 assert(KillMI && "Last use disappeared?");
1701 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1702 assert(KillOp != -1 && "Last use disappeared?");
1703 KillMI->getOperand(KillOp).setIsKill(false);
1704 }
1705 vrm.removeKillPoint(li.reg);
1706 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1707 Slot = vrm.getStackSlot(li.reg);
1708 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1709 MachineInstr *ReMatDefMI = DefIsReMat ?
1710 vrm.getReMaterializedMI(li.reg) : NULL;
1711 int LdSlot = 0;
1712 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1713 bool isLoad = isLoadSS ||
1714 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1715 bool IsFirstRange = true;
1716 for (LiveInterval::Ranges::const_iterator
1717 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718 // If this is a split live interval with multiple ranges, it means there
1719 // are two-address instructions that re-defined the value. Only the
1720 // first def can be rematerialized!
1721 if (IsFirstRange) {
1722 // Note ReMatOrigDefMI has already been deleted.
1723 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1724 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1725 false, vrm, rc, ReMatIds, loopInfo,
1726 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1727 MBBVRegsMap, NewLIs);
1728 } else {
1729 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1730 Slot, 0, false, false, false,
1731 false, vrm, rc, ReMatIds, loopInfo,
1732 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1733 MBBVRegsMap, NewLIs);
1734 }
1735 IsFirstRange = false;
1736 }
1737
1738 handleSpilledImpDefs(li, vrm, rc, NewLIs);
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001739 normalizeSpillWeights(NewLIs);
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001740 return NewLIs;
1741 }
1742
1743 bool TrySplit = !intervalIsInOneMBB(li);
1744 if (TrySplit)
1745 ++numSplits;
1746 bool NeedStackSlot = false;
1747 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1748 i != e; ++i) {
1749 const VNInfo *VNI = *i;
1750 unsigned VN = VNI->id;
1751 if (VNI->isUnused())
1752 continue; // Dead val#.
1753 // Is the def for the val# rematerializable?
1754 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1755 ? getInstructionFromIndex(VNI->def) : 0;
1756 bool dummy;
1757 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1758 // Remember how to remat the def of this val#.
1759 ReMatOrigDefs[VN] = ReMatDefMI;
1760 // Original def may be modified so we have to make a copy here.
1761 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1762 CloneMIs.push_back(Clone);
1763 ReMatDefs[VN] = Clone;
1764
1765 bool CanDelete = true;
1766 if (VNI->hasPHIKill()) {
1767 // A kill is a phi node, not all of its uses can be rematerialized.
1768 // It must not be deleted.
1769 CanDelete = false;
1770 // Need a stack slot if there is any live range where uses cannot be
1771 // rematerialized.
1772 NeedStackSlot = true;
1773 }
1774 if (CanDelete)
1775 ReMatDelete.set(VN);
1776 } else {
1777 // Need a stack slot if there is any live range where uses cannot be
1778 // rematerialized.
1779 NeedStackSlot = true;
1780 }
1781 }
1782
1783 // One stack slot per live interval.
1784 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1785 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1786 Slot = vrm.assignVirt2StackSlot(li.reg);
1787
1788 // This case only occurs when the prealloc splitter has already assigned
1789 // a stack slot to this vreg.
1790 else
1791 Slot = vrm.getStackSlot(li.reg);
1792 }
1793
1794 // Create new intervals and rewrite defs and uses.
1795 for (LiveInterval::Ranges::const_iterator
1796 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1797 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1798 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1799 bool DefIsReMat = ReMatDefMI != NULL;
1800 bool CanDelete = ReMatDelete[I->valno->id];
1801 int LdSlot = 0;
1802 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1803 bool isLoad = isLoadSS ||
1804 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1805 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1806 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1807 CanDelete, vrm, rc, ReMatIds, loopInfo,
1808 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1809 MBBVRegsMap, NewLIs);
1810 }
1811
1812 // Insert spills / restores if we are splitting.
1813 if (!TrySplit) {
1814 handleSpilledImpDefs(li, vrm, rc, NewLIs);
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001815 normalizeSpillWeights(NewLIs);
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001816 return NewLIs;
1817 }
1818
1819 SmallPtrSet<LiveInterval*, 4> AddedKill;
1820 SmallVector<unsigned, 2> Ops;
1821 if (NeedStackSlot) {
1822 int Id = SpillMBBs.find_first();
1823 while (Id != -1) {
1824 std::vector<SRInfo> &spills = SpillIdxes[Id];
1825 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1826 SlotIndex index = spills[i].index;
1827 unsigned VReg = spills[i].vreg;
1828 LiveInterval &nI = getOrCreateInterval(VReg);
1829 bool isReMat = vrm.isReMaterialized(VReg);
1830 MachineInstr *MI = getInstructionFromIndex(index);
1831 bool CanFold = false;
1832 bool FoundUse = false;
1833 Ops.clear();
1834 if (spills[i].canFold) {
1835 CanFold = true;
1836 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1837 MachineOperand &MO = MI->getOperand(j);
1838 if (!MO.isReg() || MO.getReg() != VReg)
1839 continue;
1840
1841 Ops.push_back(j);
1842 if (MO.isDef())
1843 continue;
1844 if (isReMat ||
1845 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1846 RestoreMBBs, RestoreIdxes))) {
1847 // MI has two-address uses of the same register. If the use
1848 // isn't the first and only use in the BB, then we can't fold
1849 // it. FIXME: Move this to rewriteInstructionsForSpills.
1850 CanFold = false;
1851 break;
1852 }
1853 FoundUse = true;
1854 }
1855 }
1856 // Fold the store into the def if possible.
1857 bool Folded = false;
1858 if (CanFold && !Ops.empty()) {
1859 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1860 Folded = true;
1861 if (FoundUse) {
1862 // Also folded uses, do not issue a load.
1863 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1864 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1865 }
1866 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1867 }
1868 }
1869
1870 // Otherwise tell the spiller to issue a spill.
1871 if (!Folded) {
1872 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1873 bool isKill = LR->end == index.getStoreIndex();
1874 if (!MI->registerDefIsDead(nI.reg))
1875 // No need to spill a dead def.
1876 vrm.addSpillPoint(VReg, isKill, MI);
1877 if (isKill)
1878 AddedKill.insert(&nI);
1879 }
1880 }
1881 Id = SpillMBBs.find_next(Id);
1882 }
1883 }
1884
1885 int Id = RestoreMBBs.find_first();
1886 while (Id != -1) {
1887 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1888 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1889 SlotIndex index = restores[i].index;
1890 if (index == SlotIndex())
1891 continue;
1892 unsigned VReg = restores[i].vreg;
1893 LiveInterval &nI = getOrCreateInterval(VReg);
1894 bool isReMat = vrm.isReMaterialized(VReg);
1895 MachineInstr *MI = getInstructionFromIndex(index);
1896 bool CanFold = false;
1897 Ops.clear();
1898 if (restores[i].canFold) {
1899 CanFold = true;
1900 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1901 MachineOperand &MO = MI->getOperand(j);
1902 if (!MO.isReg() || MO.getReg() != VReg)
1903 continue;
1904
1905 if (MO.isDef()) {
1906 // If this restore were to be folded, it would have been folded
1907 // already.
1908 CanFold = false;
1909 break;
1910 }
1911 Ops.push_back(j);
1912 }
1913 }
1914
1915 // Fold the load into the use if possible.
1916 bool Folded = false;
1917 if (CanFold && !Ops.empty()) {
1918 if (!isReMat)
1919 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1920 else {
1921 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1922 int LdSlot = 0;
1923 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1924 // If the rematerializable def is a load, also try to fold it.
1925 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1926 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1927 Ops, isLoadSS, LdSlot, VReg);
1928 if (!Folded) {
1929 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1930 if (ImpUse) {
1931 // Re-matting an instruction with virtual register use. Add the
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001932 // register as an implicit use on the use MI and mark the register
1933 // interval as unspillable.
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001934 LiveInterval &ImpLi = getInterval(ImpUse);
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001935 ImpLi.markNotSpillable();
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001936 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1937 }
1938 }
1939 }
1940 }
1941 // If folding is not possible / failed, then tell the spiller to issue a
1942 // load / rematerialization for us.
1943 if (Folded)
1944 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1945 else
1946 vrm.addRestorePoint(VReg, MI);
1947 }
1948 Id = RestoreMBBs.find_next(Id);
1949 }
1950
1951 // Finalize intervals: add kills, finalize spill weights, and filter out
1952 // dead intervals.
1953 std::vector<LiveInterval*> RetNewLIs;
1954 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1955 LiveInterval *LI = NewLIs[i];
1956 if (!LI->empty()) {
1957 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1958 if (!AddedKill.count(LI)) {
1959 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1960 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1961 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1962 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1963 assert(UseIdx != -1);
1964 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1965 LastUse->getOperand(UseIdx).setIsKill();
1966 vrm.addKillPoint(LI->reg, LastUseIdx);
1967 }
1968 }
1969 RetNewLIs.push_back(LI);
1970 }
1971 }
1972
1973 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
Shih-wei Liaoe4454322010-04-07 12:21:42 -07001974 normalizeSpillWeights(RetNewLIs);
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001975 return RetNewLIs;
1976}
1977
1978/// hasAllocatableSuperReg - Return true if the specified physical register has
1979/// any super register that's allocatable.
1980bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1981 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1982 if (allocatableRegs_[*AS] && hasInterval(*AS))
1983 return true;
1984 return false;
1985}
1986
1987/// getRepresentativeReg - Find the largest super register of the specified
1988/// physical register.
1989unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1990 // Find the largest super-register that is allocatable.
1991 unsigned BestReg = Reg;
1992 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1993 unsigned SuperReg = *AS;
1994 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1995 BestReg = SuperReg;
1996 break;
1997 }
1998 }
1999 return BestReg;
2000}
2001
2002/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2003/// specified interval that conflicts with the specified physical register.
2004unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2005 unsigned PhysReg) const {
2006 unsigned NumConflicts = 0;
2007 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2008 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2009 E = mri_->reg_end(); I != E; ++I) {
2010 MachineOperand &O = I.getOperand();
2011 MachineInstr *MI = O.getParent();
2012 SlotIndex Index = getInstructionIndex(MI);
2013 if (pli.liveAt(Index))
2014 ++NumConflicts;
2015 }
2016 return NumConflicts;
2017}
2018
2019/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2020/// around all defs and uses of the specified interval. Return true if it
2021/// was able to cut its interval.
2022bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2023 unsigned PhysReg, VirtRegMap &vrm) {
2024 unsigned SpillReg = getRepresentativeReg(PhysReg);
2025
2026 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2027 // If there are registers which alias PhysReg, but which are not a
2028 // sub-register of the chosen representative super register. Assert
2029 // since we can't handle it yet.
2030 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2031 tri_->isSuperRegister(*AS, SpillReg));
2032
2033 bool Cut = false;
2034 SmallVector<unsigned, 4> PRegs;
2035 if (hasInterval(SpillReg))
2036 PRegs.push_back(SpillReg);
2037 else {
2038 SmallSet<unsigned, 4> Added;
2039 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2040 if (Added.insert(*AS) && hasInterval(*AS)) {
2041 PRegs.push_back(*AS);
2042 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2043 Added.insert(*ASS);
2044 }
2045 }
2046
2047 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2048 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2049 E = mri_->reg_end(); I != E; ++I) {
2050 MachineOperand &O = I.getOperand();
2051 MachineInstr *MI = O.getParent();
2052 if (SeenMIs.count(MI))
2053 continue;
2054 SeenMIs.insert(MI);
2055 SlotIndex Index = getInstructionIndex(MI);
2056 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2057 unsigned PReg = PRegs[i];
2058 LiveInterval &pli = getInterval(PReg);
2059 if (!pli.liveAt(Index))
2060 continue;
2061 vrm.addEmergencySpill(PReg, MI);
2062 SlotIndex StartIdx = Index.getLoadIndex();
2063 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2064 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2065 pli.removeRange(StartIdx, EndIdx);
2066 Cut = true;
2067 } else {
2068 std::string msg;
2069 raw_string_ostream Msg(msg);
2070 Msg << "Ran out of registers during register allocation!";
2071 if (MI->isInlineAsm()) {
2072 Msg << "\nPlease check your inline asm statement for invalid "
2073 << "constraints:\n";
2074 MI->print(Msg, tm_);
2075 }
2076 llvm_report_error(Msg.str());
2077 }
2078 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2079 if (!hasInterval(*AS))
2080 continue;
2081 LiveInterval &spli = getInterval(*AS);
2082 if (spli.liveAt(Index))
2083 spli.removeRange(Index.getLoadIndex(),
2084 Index.getNextIndex().getBaseIndex());
2085 }
2086 }
2087 }
2088 return Cut;
2089}
2090
2091LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2092 MachineInstr* startInst) {
2093 LiveInterval& Interval = getOrCreateInterval(reg);
2094 VNInfo* VN = Interval.getNextValue(
2095 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2096 startInst, true, getVNInfoAllocator());
2097 VN->setHasPHIKill(true);
2098 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2099 LiveRange LR(
2100 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2101 getMBBEndIdx(startInst->getParent()), VN);
2102 Interval.addRange(LR);
2103
2104 return LR;
2105}
2106