blob: 432409acd32db5d3bd8e50abc539af365a264109 [file] [log] [blame]
Shih-wei Liaoe264f622010-02-10 11:10:31 -08001//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits , "Number of intervals split");
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesCFG();
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
71
72 if (!StrongPHIElim) {
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
75 }
76
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
89 delete I->second;
90
91 r2iMap_.clear();
92
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
97 CloneMIs.pop_back();
98 mf_->DeleteMachineInstr(MI);
99 }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mf_ = &fn;
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115 computeIntervals();
116
117 numIntervals += getNumIntervals();
118
119 DEBUG(dump());
120 return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
128 OS << "\n";
129 }
130
131 printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
136
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
145 else
146 OS << getInstructionIndex(mii) << '\t' << *mii;
147 }
148 }
149}
150
151void LiveIntervals::dumpInstrs() const {
152 printInstrs(dbgs());
153}
154
155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
159 return true;
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
169 }
170 if (!firstMI)
171 return false;
172
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
179 }
180 if (!lastMI)
181 return false;
182
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186 return true;
187
188 MachineBasicBlock::const_iterator E = lastMI;
189 ++E;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
192
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
197 continue;
198
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
202 if (!mop.isReg())
203 continue;
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
206 continue;
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
209 continue;
210 PhysReg = vrm.getPhys(PhysReg);
211 }
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 return true;
214 }
215 }
216
217 // No conflicts found.
218 return false;
219}
220
221/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222/// it can check use as well.
223bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230 index != end;
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
233 if (!MI)
234 continue; // skip deleted instructions
235
236 if (JoinedCopies.count(MI))
237 continue;
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
240 if (!MO.isReg())
241 continue;
242 if (MO.isUse() && !CheckUse)
243 continue;
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246 continue;
247 if (tri_->isSubRegister(Reg, PhysReg))
248 return true;
249 }
250 }
251 }
252
253 return false;
254}
255
256#ifndef NDEBUG
257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
260 else
261 dbgs() << "%reg" << reg;
262}
263#endif
264
265void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
267 SlotIndex MIIdx,
268 MachineOperand& MO,
269 unsigned MOIdx,
270 LiveInterval &interval) {
271 DEBUG({
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
274 });
275
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
285 // of inputs.
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
288 VNInfo *ValNo;
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 CopyMI = mi;
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
298
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
305 SlotIndex killIdx;
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 else
309 killIdx = defIndex.getStoreIndex();
310
311 // If the kill happens after the definition, we have an intra-block
312 // live range.
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
320 return;
321 }
322 }
323
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
331
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
334 // live interval.
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
337 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
338 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
339 interval.addRange(LR);
340 DEBUG(dbgs() << " +" << LR);
341 }
342
343 // Finally, this virtual register is live from the start of any killing
344 // block to the 'use' slot of the killing instruction.
345 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
346 MachineInstr *Kill = vi.Kills[i];
347 SlotIndex killIdx =
348 getInstructionIndex(Kill).getDefIndex();
349 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
350 interval.addRange(LR);
351 ValNo->addKill(killIdx);
352 DEBUG(dbgs() << " +" << LR);
353 }
354
355 } else {
356 // If this is the second time we see a virtual register definition, it
357 // must be due to phi elimination or two addr elimination. If this is
358 // the result of two address elimination, then the vreg is one of the
359 // def-and-use register operand.
360 if (mi->isRegTiedToUseOperand(MOIdx)) {
361 // If this is a two-address definition, then we have already processed
362 // the live range. The only problem is that we didn't realize there
363 // are actually two values in the live interval. Because of this we
364 // need to take the LiveRegion that defines this register and split it
365 // into two values.
366 assert(interval.containsOneValue());
367 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
368 SlotIndex RedefIndex = MIIdx.getDefIndex();
369 if (MO.isEarlyClobber())
370 RedefIndex = MIIdx.getUseIndex();
371
372 const LiveRange *OldLR =
373 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
374 VNInfo *OldValNo = OldLR->valno;
375
376 // Delete the initial value, which should be short and continuous,
377 // because the 2-addr copy must be in the same MBB as the redef.
378 interval.removeRange(DefIndex, RedefIndex);
379
380 // Two-address vregs should always only be redefined once. This means
381 // that at this point, there should be exactly one value number in it.
382 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
383
384 // The new value number (#1) is defined by the instruction we claimed
385 // defined value #0.
386 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
387 false, // update at *
388 VNInfoAllocator);
389 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
390
391 // Value#0 is now defined by the 2-addr instruction.
392 OldValNo->def = RedefIndex;
393 OldValNo->setCopy(0);
394
395 // Add the new live interval which replaces the range for the input copy.
396 LiveRange LR(DefIndex, RedefIndex, ValNo);
397 DEBUG(dbgs() << " replace range with " << LR);
398 interval.addRange(LR);
399 ValNo->addKill(RedefIndex);
400
401 // If this redefinition is dead, we need to add a dummy unit live
402 // range covering the def slot.
403 if (MO.isDead())
404 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
405 OldValNo));
406
407 DEBUG({
408 dbgs() << " RESULT: ";
409 interval.print(dbgs(), tri_);
410 });
411 } else {
412 // Otherwise, this must be because of phi elimination. If this is the
413 // first redefinition of the vreg that we have seen, go back and change
414 // the live range in the PHI block to be a different value number.
415 if (interval.containsOneValue()) {
416
417 VNInfo *VNI = interval.getValNumInfo(0);
418 // Phi elimination may have reused the register for multiple identical
419 // phi nodes. There will be a kill per phi. Remove the old ranges that
420 // we now know have an incorrect number.
421 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
422 MachineInstr *Killer = vi.Kills[ki];
423 SlotIndex Start = getMBBStartIdx(Killer->getParent());
424 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
425 DEBUG({
426 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
427 interval.print(dbgs(), tri_);
428 });
429 interval.removeRange(Start, End);
430
431 // Replace the interval with one of a NEW value number. Note that
432 // this value number isn't actually defined by an instruction, weird
433 // huh? :)
434 LiveRange LR(Start, End,
435 interval.getNextValue(SlotIndex(Start, true),
436 0, false, VNInfoAllocator));
437 LR.valno->setIsPHIDef(true);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
440 }
441
442 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
443 VNI->addKill(indexes_->getTerminatorGap(killMBB));
444 VNI->setHasPHIKill(true);
445 DEBUG({
446 dbgs() << " RESULT: ";
447 interval.print(dbgs(), tri_);
448 });
449 }
450
451 // In the case of PHI elimination, each variable definition is only
452 // live until the end of the block. We've already taken care of the
453 // rest of the live range.
454 SlotIndex defIndex = MIIdx.getDefIndex();
455 if (MO.isEarlyClobber())
456 defIndex = MIIdx.getUseIndex();
457
458 VNInfo *ValNo;
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
462 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
463 CopyMI = mi;
464 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
465
466 SlotIndex killIndex = getMBBEndIdx(mbb);
467 LiveRange LR(defIndex, killIndex, ValNo);
468 interval.addRange(LR);
469 ValNo->addKill(indexes_->getTerminatorGap(mbb));
470 ValNo->setHasPHIKill(true);
471 DEBUG(dbgs() << " +" << LR);
472 }
473 }
474
475 DEBUG(dbgs() << '\n');
476}
477
478void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
479 MachineBasicBlock::iterator mi,
480 SlotIndex MIIdx,
481 MachineOperand& MO,
482 LiveInterval &interval,
483 MachineInstr *CopyMI) {
484 // A physical register cannot be live across basic block, so its
485 // lifetime must end somewhere in its defining basic block.
486 DEBUG({
487 dbgs() << "\t\tregister: ";
488 printRegName(interval.reg, tri_);
489 });
490
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
497
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
503 if (MO.isDead()) {
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
506 goto exit;
507 }
508
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
514
515 if (mi->isDebugValue())
516 continue;
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
519
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
523 goto exit;
524 } else {
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
526 if (DefIdx != -1) {
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
530 } else {
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
537 }
538 goto exit;
539 }
540 }
541
542 baseIndex = baseIndex.getNextIndex();
543 }
544
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
550
551exit:
552 assert(start < end && "did not find end of interval?");
553
554 // Already exists? Extend old live interval.
555 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
556 bool Extend = OldLR != interval.end();
557 VNInfo *ValNo = Extend
558 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
559 if (MO.isEarlyClobber() && Extend)
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 LR.valno->addKill(end);
564 DEBUG(dbgs() << " +" << LR << '\n');
565}
566
567void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
569 SlotIndex MIIdx,
570 MachineOperand& MO,
571 unsigned MOIdx) {
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
578 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
579 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
580 CopyMI = MI;
581 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
582 getOrCreateInterval(MO.getReg()), CopyMI);
583 // Def of a register also defines its sub-registers.
584 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
585 // If MI also modifies the sub-register explicitly, avoid processing it
586 // more than once. Do not pass in TRI here so it checks for exact match.
587 if (!MI->modifiesRegister(*AS))
588 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
589 getOrCreateInterval(*AS), 0);
590 }
591}
592
593void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
594 SlotIndex MIIdx,
595 LiveInterval &interval, bool isAlias) {
596 DEBUG({
597 dbgs() << "\t\tlivein register: ";
598 printRegName(interval.reg, tri_);
599 });
600
601 // Look for kills, if it reaches a def before it's killed, then it shouldn't
602 // be considered a livein.
603 MachineBasicBlock::iterator mi = MBB->begin();
604 SlotIndex baseIndex = MIIdx;
605 SlotIndex start = baseIndex;
606 if (getInstructionFromIndex(baseIndex) == 0)
607 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
608
609 SlotIndex end = baseIndex;
610 bool SeenDefUse = false;
611
612 MachineBasicBlock::iterator E = MBB->end();
613 while (mi != E) {
614 if (mi->isDebugValue()) {
615 ++mi;
616 continue;
617 }
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
621 SeenDefUse = true;
622 break;
623 } else if (mi->modifiesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
630 SeenDefUse = true;
631 break;
632 }
633
634 ++mi;
635 if (mi != E && !mi->isDebugValue()) {
636 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
637 }
638 }
639
640 // Live-in register might not be used at all.
641 if (!SeenDefUse) {
642 if (isAlias) {
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
645 } else {
646 DEBUG(dbgs() << " live through");
647 end = baseIndex;
648 }
649 }
650
651 VNInfo *vni =
652 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
653 0, false, VNInfoAllocator);
654 vni->setIsPHIDef(true);
655 LiveRange LR(start, end, vni);
656
657 interval.addRange(LR);
658 LR.valno->addKill(end);
659 DEBUG(dbgs() << " +" << LR << '\n');
660}
661
662/// computeIntervals - computes the live intervals for virtual
663/// registers. for some ordering of the machine instructions [1,N] a
664/// live interval is an interval [i, j) where 1 <= i <= j < N for
665/// which a variable is live
666void LiveIntervals::computeIntervals() {
667 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
668 << "********** Function: "
669 << ((Value*)mf_->getFunction())->getName() << '\n');
670
671 SmallVector<unsigned, 8> UndefUses;
672 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
673 MBBI != E; ++MBBI) {
674 MachineBasicBlock *MBB = MBBI;
675 if (MBB->empty())
676 continue;
677
678 // Track the index of the current machine instr.
679 SlotIndex MIIndex = getMBBStartIdx(MBB);
680 DEBUG(dbgs() << MBB->getName() << ":\n");
681
682 // Create intervals for live-ins to this BB first.
683 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
684 LE = MBB->livein_end(); LI != LE; ++LI) {
685 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
686 // Multiple live-ins can alias the same register.
687 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
688 if (!hasInterval(*AS))
689 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
690 true);
691 }
692
693 // Skip over empty initial indices.
694 if (getInstructionFromIndex(MIIndex) == 0)
695 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
696
697 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
698 MI != miEnd; ++MI) {
699 DEBUG(dbgs() << MIIndex << "\t" << *MI);
700 if (MI->isDebugValue())
701 continue;
702
703 // Handle defs.
704 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
705 MachineOperand &MO = MI->getOperand(i);
706 if (!MO.isReg() || !MO.getReg())
707 continue;
708
709 // handle register defs - build intervals
710 if (MO.isDef())
711 handleRegisterDef(MBB, MI, MIIndex, MO, i);
712 else if (MO.isUndef())
713 UndefUses.push_back(MO.getReg());
714 }
715
716 // Move to the next instr slot.
717 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
718 }
719 }
720
721 // Create empty intervals for registers defined by implicit_def's (except
722 // for those implicit_def that define values which are liveout of their
723 // blocks.
724 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
725 unsigned UndefReg = UndefUses[i];
726 (void)getOrCreateInterval(UndefReg);
727 }
728}
729
730LiveInterval* LiveIntervals::createInterval(unsigned reg) {
731 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
732 return new LiveInterval(reg, Weight);
733}
734
735/// dupInterval - Duplicate a live interval. The caller is responsible for
736/// managing the allocated memory.
737LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
738 LiveInterval *NewLI = createInterval(li->reg);
739 NewLI->Copy(*li, mri_, getVNInfoAllocator());
740 return NewLI;
741}
742
743/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
744/// copy field and returns the source register that defines it.
745unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
746 if (!VNI->getCopy())
747 return 0;
748
749 if (VNI->getCopy()->isExtractSubreg()) {
750 // If it's extracting out of a physical register, return the sub-register.
751 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
752 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
753 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
754 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
755 if (SrcSubReg == DstSubReg)
756 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
757 // reg1034 can still be coalesced to EDX.
758 return Reg;
759 assert(DstSubReg == 0);
760 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
761 }
762 return Reg;
763 } else if (VNI->getCopy()->isInsertSubreg() ||
764 VNI->getCopy()->isSubregToReg())
765 return VNI->getCopy()->getOperand(2).getReg();
766
767 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
768 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
769 return SrcReg;
770 llvm_unreachable("Unrecognized copy instruction!");
771 return 0;
772}
773
774//===----------------------------------------------------------------------===//
775// Register allocator hooks.
776//
777
778/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
779/// allow one) virtual register operand, then its uses are implicitly using
780/// the register. Returns the virtual register.
781unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
782 MachineInstr *MI) const {
783 unsigned RegOp = 0;
784 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
785 MachineOperand &MO = MI->getOperand(i);
786 if (!MO.isReg() || !MO.isUse())
787 continue;
788 unsigned Reg = MO.getReg();
789 if (Reg == 0 || Reg == li.reg)
790 continue;
791
792 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
793 !allocatableRegs_[Reg])
794 continue;
795 // FIXME: For now, only remat MI with at most one register operand.
796 assert(!RegOp &&
797 "Can't rematerialize instruction with multiple register operand!");
798 RegOp = MO.getReg();
799#ifndef NDEBUG
800 break;
801#endif
802 }
803 return RegOp;
804}
805
806/// isValNoAvailableAt - Return true if the val# of the specified interval
807/// which reaches the given instruction also reaches the specified use index.
808bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
809 SlotIndex UseIdx) const {
810 SlotIndex Index = getInstructionIndex(MI);
811 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
812 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
813 return UI != li.end() && UI->valno == ValNo;
814}
815
816/// isReMaterializable - Returns true if the definition MI of the specified
817/// val# of the specified interval is re-materializable.
818bool LiveIntervals::isReMaterializable(const LiveInterval &li,
819 const VNInfo *ValNo, MachineInstr *MI,
820 SmallVectorImpl<LiveInterval*> &SpillIs,
821 bool &isLoad) {
822 if (DisableReMat)
823 return false;
824
825 if (!tii_->isTriviallyReMaterializable(MI, aa_))
826 return false;
827
828 // Target-specific code can mark an instruction as being rematerializable
829 // if it has one virtual reg use, though it had better be something like
830 // a PIC base register which is likely to be live everywhere.
831 unsigned ImpUse = getReMatImplicitUse(li, MI);
832 if (ImpUse) {
833 const LiveInterval &ImpLi = getInterval(ImpUse);
834 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
835 re = mri_->use_end(); ri != re; ++ri) {
836 MachineInstr *UseMI = &*ri;
837 SlotIndex UseIdx = getInstructionIndex(UseMI);
838 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
839 continue;
840 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
841 return false;
842 }
843
844 // If a register operand of the re-materialized instruction is going to
845 // be spilled next, then it's not legal to re-materialize this instruction.
846 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
847 if (ImpUse == SpillIs[i]->reg)
848 return false;
849 }
850 return true;
851}
852
853/// isReMaterializable - Returns true if the definition MI of the specified
854/// val# of the specified interval is re-materializable.
855bool LiveIntervals::isReMaterializable(const LiveInterval &li,
856 const VNInfo *ValNo, MachineInstr *MI) {
857 SmallVector<LiveInterval*, 4> Dummy1;
858 bool Dummy2;
859 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
860}
861
862/// isReMaterializable - Returns true if every definition of MI of every
863/// val# of the specified interval is re-materializable.
864bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865 SmallVectorImpl<LiveInterval*> &SpillIs,
866 bool &isLoad) {
867 isLoad = false;
868 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
869 i != e; ++i) {
870 const VNInfo *VNI = *i;
871 if (VNI->isUnused())
872 continue; // Dead val#.
873 // Is the def for the val# rematerializable?
874 if (!VNI->isDefAccurate())
875 return false;
876 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
877 bool DefIsLoad = false;
878 if (!ReMatDefMI ||
879 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
880 return false;
881 isLoad |= DefIsLoad;
882 }
883 return true;
884}
885
886/// FilterFoldedOps - Filter out two-address use operands. Return
887/// true if it finds any issue with the operands that ought to prevent
888/// folding.
889static bool FilterFoldedOps(MachineInstr *MI,
890 SmallVector<unsigned, 2> &Ops,
891 unsigned &MRInfo,
892 SmallVector<unsigned, 2> &FoldOps) {
893 MRInfo = 0;
894 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
895 unsigned OpIdx = Ops[i];
896 MachineOperand &MO = MI->getOperand(OpIdx);
897 // FIXME: fold subreg use.
898 if (MO.getSubReg())
899 return true;
900 if (MO.isDef())
901 MRInfo |= (unsigned)VirtRegMap::isMod;
902 else {
903 // Filter out two-address use operand(s).
904 if (MI->isRegTiedToDefOperand(OpIdx)) {
905 MRInfo = VirtRegMap::isModRef;
906 continue;
907 }
908 MRInfo |= (unsigned)VirtRegMap::isRef;
909 }
910 FoldOps.push_back(OpIdx);
911 }
912 return false;
913}
914
915
916/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
917/// slot / to reg or any rematerialized load into ith operand of specified
918/// MI. If it is successul, MI is updated with the newly created MI and
919/// returns true.
920bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
921 VirtRegMap &vrm, MachineInstr *DefMI,
922 SlotIndex InstrIdx,
923 SmallVector<unsigned, 2> &Ops,
924 bool isSS, int Slot, unsigned Reg) {
925 // If it is an implicit def instruction, just delete it.
926 if (MI->isImplicitDef()) {
927 RemoveMachineInstrFromMaps(MI);
928 vrm.RemoveMachineInstrFromMaps(MI);
929 MI->eraseFromParent();
930 ++numFolds;
931 return true;
932 }
933
934 // Filter the list of operand indexes that are to be folded. Abort if
935 // any operand will prevent folding.
936 unsigned MRInfo = 0;
937 SmallVector<unsigned, 2> FoldOps;
938 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
939 return false;
940
941 // The only time it's safe to fold into a two address instruction is when
942 // it's folding reload and spill from / into a spill stack slot.
943 if (DefMI && (MRInfo & VirtRegMap::isMod))
944 return false;
945
946 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
947 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
948 if (fmi) {
949 // Remember this instruction uses the spill slot.
950 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
951
952 // Attempt to fold the memory reference into the instruction. If
953 // we can do this, we don't need to insert spill code.
954 MachineBasicBlock &MBB = *MI->getParent();
955 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
956 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
957 vrm.transferSpillPts(MI, fmi);
958 vrm.transferRestorePts(MI, fmi);
959 vrm.transferEmergencySpills(MI, fmi);
960 ReplaceMachineInstrInMaps(MI, fmi);
961 MI = MBB.insert(MBB.erase(MI), fmi);
962 ++numFolds;
963 return true;
964 }
965 return false;
966}
967
968/// canFoldMemoryOperand - Returns true if the specified load / store
969/// folding is possible.
970bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
971 SmallVector<unsigned, 2> &Ops,
972 bool ReMat) const {
973 // Filter the list of operand indexes that are to be folded. Abort if
974 // any operand will prevent folding.
975 unsigned MRInfo = 0;
976 SmallVector<unsigned, 2> FoldOps;
977 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
978 return false;
979
980 // It's only legal to remat for a use, not a def.
981 if (ReMat && (MRInfo & VirtRegMap::isMod))
982 return false;
983
984 return tii_->canFoldMemoryOperand(MI, FoldOps);
985}
986
987bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
988 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
989
990 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
991
992 if (mbb == 0)
993 return false;
994
995 for (++itr; itr != li.ranges.end(); ++itr) {
996 MachineBasicBlock *mbb2 =
997 indexes_->getMBBCoveringRange(itr->start, itr->end);
998
999 if (mbb2 != mbb)
1000 return false;
1001 }
1002
1003 return true;
1004}
1005
1006/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1007/// interval on to-be re-materialized operands of MI) with new register.
1008void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1009 MachineInstr *MI, unsigned NewVReg,
1010 VirtRegMap &vrm) {
1011 // There is an implicit use. That means one of the other operand is
1012 // being remat'ed and the remat'ed instruction has li.reg as an
1013 // use operand. Make sure we rewrite that as well.
1014 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1015 MachineOperand &MO = MI->getOperand(i);
1016 if (!MO.isReg())
1017 continue;
1018 unsigned Reg = MO.getReg();
1019 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1020 continue;
1021 if (!vrm.isReMaterialized(Reg))
1022 continue;
1023 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1024 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1025 if (UseMO)
1026 UseMO->setReg(NewVReg);
1027 }
1028}
1029
1030/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1031/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1032bool LiveIntervals::
1033rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1034 bool TrySplit, SlotIndex index, SlotIndex end,
1035 MachineInstr *MI,
1036 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1037 unsigned Slot, int LdSlot,
1038 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1039 VirtRegMap &vrm,
1040 const TargetRegisterClass* rc,
1041 SmallVector<int, 4> &ReMatIds,
1042 const MachineLoopInfo *loopInfo,
1043 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1044 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1045 std::vector<LiveInterval*> &NewLIs) {
1046 bool CanFold = false;
1047 RestartInstruction:
1048 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1049 MachineOperand& mop = MI->getOperand(i);
1050 if (!mop.isReg())
1051 continue;
1052 unsigned Reg = mop.getReg();
1053 unsigned RegI = Reg;
1054 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1055 continue;
1056 if (Reg != li.reg)
1057 continue;
1058
1059 bool TryFold = !DefIsReMat;
1060 bool FoldSS = true; // Default behavior unless it's a remat.
1061 int FoldSlot = Slot;
1062 if (DefIsReMat) {
1063 // If this is the rematerializable definition MI itself and
1064 // all of its uses are rematerialized, simply delete it.
1065 if (MI == ReMatOrigDefMI && CanDelete) {
1066 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1067 << MI << '\n');
1068 RemoveMachineInstrFromMaps(MI);
1069 vrm.RemoveMachineInstrFromMaps(MI);
1070 MI->eraseFromParent();
1071 break;
1072 }
1073
1074 // If def for this use can't be rematerialized, then try folding.
1075 // If def is rematerializable and it's a load, also try folding.
1076 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1077 if (isLoad) {
1078 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1079 FoldSS = isLoadSS;
1080 FoldSlot = LdSlot;
1081 }
1082 }
1083
1084 // Scan all of the operands of this instruction rewriting operands
1085 // to use NewVReg instead of li.reg as appropriate. We do this for
1086 // two reasons:
1087 //
1088 // 1. If the instr reads the same spilled vreg multiple times, we
1089 // want to reuse the NewVReg.
1090 // 2. If the instr is a two-addr instruction, we are required to
1091 // keep the src/dst regs pinned.
1092 //
1093 // Keep track of whether we replace a use and/or def so that we can
1094 // create the spill interval with the appropriate range.
1095
1096 HasUse = mop.isUse();
1097 HasDef = mop.isDef();
1098 SmallVector<unsigned, 2> Ops;
1099 Ops.push_back(i);
1100 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1101 const MachineOperand &MOj = MI->getOperand(j);
1102 if (!MOj.isReg())
1103 continue;
1104 unsigned RegJ = MOj.getReg();
1105 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1106 continue;
1107 if (RegJ == RegI) {
1108 Ops.push_back(j);
1109 if (!MOj.isUndef()) {
1110 HasUse |= MOj.isUse();
1111 HasDef |= MOj.isDef();
1112 }
1113 }
1114 }
1115
1116 // Create a new virtual register for the spill interval.
1117 // Create the new register now so we can map the fold instruction
1118 // to the new register so when it is unfolded we get the correct
1119 // answer.
1120 bool CreatedNewVReg = false;
1121 if (NewVReg == 0) {
1122 NewVReg = mri_->createVirtualRegister(rc);
1123 vrm.grow();
1124 CreatedNewVReg = true;
1125
1126 // The new virtual register should get the same allocation hints as the
1127 // old one.
1128 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1129 if (Hint.first || Hint.second)
1130 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1131 }
1132
1133 if (!TryFold)
1134 CanFold = false;
1135 else {
1136 // Do not fold load / store here if we are splitting. We'll find an
1137 // optimal point to insert a load / store later.
1138 if (!TrySplit) {
1139 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1140 Ops, FoldSS, FoldSlot, NewVReg)) {
1141 // Folding the load/store can completely change the instruction in
1142 // unpredictable ways, rescan it from the beginning.
1143
1144 if (FoldSS) {
1145 // We need to give the new vreg the same stack slot as the
1146 // spilled interval.
1147 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1148 }
1149
1150 HasUse = false;
1151 HasDef = false;
1152 CanFold = false;
1153 if (isNotInMIMap(MI))
1154 break;
1155 goto RestartInstruction;
1156 }
1157 } else {
1158 // We'll try to fold it later if it's profitable.
1159 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1160 }
1161 }
1162
1163 mop.setReg(NewVReg);
1164 if (mop.isImplicit())
1165 rewriteImplicitOps(li, MI, NewVReg, vrm);
1166
1167 // Reuse NewVReg for other reads.
1168 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1169 MachineOperand &mopj = MI->getOperand(Ops[j]);
1170 mopj.setReg(NewVReg);
1171 if (mopj.isImplicit())
1172 rewriteImplicitOps(li, MI, NewVReg, vrm);
1173 }
1174
1175 if (CreatedNewVReg) {
1176 if (DefIsReMat) {
1177 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1178 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1179 // Each valnum may have its own remat id.
1180 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1181 } else {
1182 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1183 }
1184 if (!CanDelete || (HasUse && HasDef)) {
1185 // If this is a two-addr instruction then its use operands are
1186 // rematerializable but its def is not. It should be assigned a
1187 // stack slot.
1188 vrm.assignVirt2StackSlot(NewVReg, Slot);
1189 }
1190 } else {
1191 vrm.assignVirt2StackSlot(NewVReg, Slot);
1192 }
1193 } else if (HasUse && HasDef &&
1194 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1195 // If this interval hasn't been assigned a stack slot (because earlier
1196 // def is a deleted remat def), do it now.
1197 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1198 vrm.assignVirt2StackSlot(NewVReg, Slot);
1199 }
1200
1201 // Re-matting an instruction with virtual register use. Add the
1202 // register as an implicit use on the use MI.
1203 if (DefIsReMat && ImpUse)
1204 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1205
1206 // Create a new register interval for this spill / remat.
1207 LiveInterval &nI = getOrCreateInterval(NewVReg);
1208 if (CreatedNewVReg) {
1209 NewLIs.push_back(&nI);
1210 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1211 if (TrySplit)
1212 vrm.setIsSplitFromReg(NewVReg, li.reg);
1213 }
1214
1215 if (HasUse) {
1216 if (CreatedNewVReg) {
1217 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1218 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1219 DEBUG(dbgs() << " +" << LR);
1220 nI.addRange(LR);
1221 } else {
1222 // Extend the split live interval to this def / use.
1223 SlotIndex End = index.getDefIndex();
1224 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1225 nI.getValNumInfo(nI.getNumValNums()-1));
1226 DEBUG(dbgs() << " +" << LR);
1227 nI.addRange(LR);
1228 }
1229 }
1230 if (HasDef) {
1231 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1232 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1233 DEBUG(dbgs() << " +" << LR);
1234 nI.addRange(LR);
1235 }
1236
1237 DEBUG({
1238 dbgs() << "\t\t\t\tAdded new interval: ";
1239 nI.print(dbgs(), tri_);
1240 dbgs() << '\n';
1241 });
1242 }
1243 return CanFold;
1244}
1245bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1246 const VNInfo *VNI,
1247 MachineBasicBlock *MBB,
1248 SlotIndex Idx) const {
1249 SlotIndex End = getMBBEndIdx(MBB);
1250 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1251 if (VNI->kills[j].isPHI())
1252 continue;
1253
1254 SlotIndex KillIdx = VNI->kills[j];
1255 if (KillIdx > Idx && KillIdx <= End)
1256 return true;
1257 }
1258 return false;
1259}
1260
1261/// RewriteInfo - Keep track of machine instrs that will be rewritten
1262/// during spilling.
1263namespace {
1264 struct RewriteInfo {
1265 SlotIndex Index;
1266 MachineInstr *MI;
1267 bool HasUse;
1268 bool HasDef;
1269 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1270 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1271 };
1272
1273 struct RewriteInfoCompare {
1274 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1275 return LHS.Index < RHS.Index;
1276 }
1277 };
1278}
1279
1280void LiveIntervals::
1281rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1282 LiveInterval::Ranges::const_iterator &I,
1283 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1284 unsigned Slot, int LdSlot,
1285 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1286 VirtRegMap &vrm,
1287 const TargetRegisterClass* rc,
1288 SmallVector<int, 4> &ReMatIds,
1289 const MachineLoopInfo *loopInfo,
1290 BitVector &SpillMBBs,
1291 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1292 BitVector &RestoreMBBs,
1293 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1294 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1295 std::vector<LiveInterval*> &NewLIs) {
1296 bool AllCanFold = true;
1297 unsigned NewVReg = 0;
1298 SlotIndex start = I->start.getBaseIndex();
1299 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1300
1301 // First collect all the def / use in this live range that will be rewritten.
1302 // Make sure they are sorted according to instruction index.
1303 std::vector<RewriteInfo> RewriteMIs;
1304 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1305 re = mri_->reg_end(); ri != re; ) {
1306 MachineInstr *MI = &*ri;
1307 MachineOperand &O = ri.getOperand();
1308 ++ri;
1309 if (MI->isDebugValue()) {
1310 // Remove debug info for now.
1311 O.setReg(0U);
1312 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1313 continue;
1314 }
1315 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1316 SlotIndex index = getInstructionIndex(MI);
1317 if (index < start || index >= end)
1318 continue;
1319
1320 if (O.isUndef())
1321 // Must be defined by an implicit def. It should not be spilled. Note,
1322 // this is for correctness reason. e.g.
1323 // 8 %reg1024<def> = IMPLICIT_DEF
1324 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1325 // The live range [12, 14) are not part of the r1024 live interval since
1326 // it's defined by an implicit def. It will not conflicts with live
1327 // interval of r1025. Now suppose both registers are spilled, you can
1328 // easily see a situation where both registers are reloaded before
1329 // the INSERT_SUBREG and both target registers that would overlap.
1330 continue;
1331 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1332 }
1333 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1334
1335 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1336 // Now rewrite the defs and uses.
1337 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1338 RewriteInfo &rwi = RewriteMIs[i];
1339 ++i;
1340 SlotIndex index = rwi.Index;
1341 bool MIHasUse = rwi.HasUse;
1342 bool MIHasDef = rwi.HasDef;
1343 MachineInstr *MI = rwi.MI;
1344 // If MI def and/or use the same register multiple times, then there
1345 // are multiple entries.
1346 unsigned NumUses = MIHasUse;
1347 while (i != e && RewriteMIs[i].MI == MI) {
1348 assert(RewriteMIs[i].Index == index);
1349 bool isUse = RewriteMIs[i].HasUse;
1350 if (isUse) ++NumUses;
1351 MIHasUse |= isUse;
1352 MIHasDef |= RewriteMIs[i].HasDef;
1353 ++i;
1354 }
1355 MachineBasicBlock *MBB = MI->getParent();
1356
1357 if (ImpUse && MI != ReMatDefMI) {
1358 // Re-matting an instruction with virtual register use. Update the
1359 // register interval's spill weight to HUGE_VALF to prevent it from
1360 // being spilled.
1361 LiveInterval &ImpLi = getInterval(ImpUse);
1362 ImpLi.weight = HUGE_VALF;
1363 }
1364
1365 unsigned MBBId = MBB->getNumber();
1366 unsigned ThisVReg = 0;
1367 if (TrySplit) {
1368 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1369 if (NVI != MBBVRegsMap.end()) {
1370 ThisVReg = NVI->second;
1371 // One common case:
1372 // x = use
1373 // ...
1374 // ...
1375 // def = ...
1376 // = use
1377 // It's better to start a new interval to avoid artifically
1378 // extend the new interval.
1379 if (MIHasDef && !MIHasUse) {
1380 MBBVRegsMap.erase(MBB->getNumber());
1381 ThisVReg = 0;
1382 }
1383 }
1384 }
1385
1386 bool IsNew = ThisVReg == 0;
1387 if (IsNew) {
1388 // This ends the previous live interval. If all of its def / use
1389 // can be folded, give it a low spill weight.
1390 if (NewVReg && TrySplit && AllCanFold) {
1391 LiveInterval &nI = getOrCreateInterval(NewVReg);
1392 nI.weight /= 10.0F;
1393 }
1394 AllCanFold = true;
1395 }
1396 NewVReg = ThisVReg;
1397
1398 bool HasDef = false;
1399 bool HasUse = false;
1400 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1401 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1402 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1403 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1404 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1405 if (!HasDef && !HasUse)
1406 continue;
1407
1408 AllCanFold &= CanFold;
1409
1410 // Update weight of spill interval.
1411 LiveInterval &nI = getOrCreateInterval(NewVReg);
1412 if (!TrySplit) {
1413 // The spill weight is now infinity as it cannot be spilled again.
1414 nI.weight = HUGE_VALF;
1415 continue;
1416 }
1417
1418 // Keep track of the last def and first use in each MBB.
1419 if (HasDef) {
1420 if (MI != ReMatOrigDefMI || !CanDelete) {
1421 bool HasKill = false;
1422 if (!HasUse)
1423 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1424 else {
1425 // If this is a two-address code, then this index starts a new VNInfo.
1426 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1427 if (VNI)
1428 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1429 }
1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1431 SpillIdxes.find(MBBId);
1432 if (!HasKill) {
1433 if (SII == SpillIdxes.end()) {
1434 std::vector<SRInfo> S;
1435 S.push_back(SRInfo(index, NewVReg, true));
1436 SpillIdxes.insert(std::make_pair(MBBId, S));
1437 } else if (SII->second.back().vreg != NewVReg) {
1438 SII->second.push_back(SRInfo(index, NewVReg, true));
1439 } else if (index > SII->second.back().index) {
1440 // If there is an earlier def and this is a two-address
1441 // instruction, then it's not possible to fold the store (which
1442 // would also fold the load).
1443 SRInfo &Info = SII->second.back();
1444 Info.index = index;
1445 Info.canFold = !HasUse;
1446 }
1447 SpillMBBs.set(MBBId);
1448 } else if (SII != SpillIdxes.end() &&
1449 SII->second.back().vreg == NewVReg &&
1450 index > SII->second.back().index) {
1451 // There is an earlier def that's not killed (must be two-address).
1452 // The spill is no longer needed.
1453 SII->second.pop_back();
1454 if (SII->second.empty()) {
1455 SpillIdxes.erase(MBBId);
1456 SpillMBBs.reset(MBBId);
1457 }
1458 }
1459 }
1460 }
1461
1462 if (HasUse) {
1463 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1464 SpillIdxes.find(MBBId);
1465 if (SII != SpillIdxes.end() &&
1466 SII->second.back().vreg == NewVReg &&
1467 index > SII->second.back().index)
1468 // Use(s) following the last def, it's not safe to fold the spill.
1469 SII->second.back().canFold = false;
1470 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1471 RestoreIdxes.find(MBBId);
1472 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1473 // If we are splitting live intervals, only fold if it's the first
1474 // use and there isn't another use later in the MBB.
1475 RII->second.back().canFold = false;
1476 else if (IsNew) {
1477 // Only need a reload if there isn't an earlier def / use.
1478 if (RII == RestoreIdxes.end()) {
1479 std::vector<SRInfo> Infos;
1480 Infos.push_back(SRInfo(index, NewVReg, true));
1481 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1482 } else {
1483 RII->second.push_back(SRInfo(index, NewVReg, true));
1484 }
1485 RestoreMBBs.set(MBBId);
1486 }
1487 }
1488
1489 // Update spill weight.
1490 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1491 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1492 }
1493
1494 if (NewVReg && TrySplit && AllCanFold) {
1495 // If all of its def / use can be folded, give it a low spill weight.
1496 LiveInterval &nI = getOrCreateInterval(NewVReg);
1497 nI.weight /= 10.0F;
1498 }
1499}
1500
1501bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1502 unsigned vr, BitVector &RestoreMBBs,
1503 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1504 if (!RestoreMBBs[Id])
1505 return false;
1506 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1507 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1508 if (Restores[i].index == index &&
1509 Restores[i].vreg == vr &&
1510 Restores[i].canFold)
1511 return true;
1512 return false;
1513}
1514
1515void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1516 unsigned vr, BitVector &RestoreMBBs,
1517 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1518 if (!RestoreMBBs[Id])
1519 return;
1520 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1521 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1522 if (Restores[i].index == index && Restores[i].vreg)
1523 Restores[i].index = SlotIndex();
1524}
1525
1526/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1527/// spilled and create empty intervals for their uses.
1528void
1529LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1530 const TargetRegisterClass* rc,
1531 std::vector<LiveInterval*> &NewLIs) {
1532 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1533 re = mri_->reg_end(); ri != re; ) {
1534 MachineOperand &O = ri.getOperand();
1535 MachineInstr *MI = &*ri;
1536 ++ri;
1537 if (O.isDef()) {
1538 assert(MI->isImplicitDef() &&
1539 "Register def was not rewritten?");
1540 RemoveMachineInstrFromMaps(MI);
1541 vrm.RemoveMachineInstrFromMaps(MI);
1542 MI->eraseFromParent();
1543 } else {
1544 // This must be an use of an implicit_def so it's not part of the live
1545 // interval. Create a new empty live interval for it.
1546 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1547 unsigned NewVReg = mri_->createVirtualRegister(rc);
1548 vrm.grow();
1549 vrm.setIsImplicitlyDefined(NewVReg);
1550 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1551 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1552 MachineOperand &MO = MI->getOperand(i);
1553 if (MO.isReg() && MO.getReg() == li.reg) {
1554 MO.setReg(NewVReg);
1555 MO.setIsUndef();
1556 }
1557 }
1558 }
1559 }
1560}
1561
1562std::vector<LiveInterval*> LiveIntervals::
1563addIntervalsForSpillsFast(const LiveInterval &li,
1564 const MachineLoopInfo *loopInfo,
1565 VirtRegMap &vrm) {
1566 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1567
1568 std::vector<LiveInterval*> added;
1569
1570 assert(li.weight != HUGE_VALF &&
1571 "attempt to spill already spilled interval!");
1572
1573 DEBUG({
1574 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1575 li.dump();
1576 dbgs() << '\n';
1577 });
1578
1579 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1580
1581 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1582 while (RI != mri_->reg_end()) {
1583 MachineInstr* MI = &*RI;
1584
1585 SmallVector<unsigned, 2> Indices;
1586 bool HasUse = false;
1587 bool HasDef = false;
1588
1589 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1590 MachineOperand& mop = MI->getOperand(i);
1591 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1592
1593 HasUse |= MI->getOperand(i).isUse();
1594 HasDef |= MI->getOperand(i).isDef();
1595
1596 Indices.push_back(i);
1597 }
1598
1599 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1600 Indices, true, slot, li.reg)) {
1601 unsigned NewVReg = mri_->createVirtualRegister(rc);
1602 vrm.grow();
1603 vrm.assignVirt2StackSlot(NewVReg, slot);
1604
1605 // create a new register for this spill
1606 LiveInterval &nI = getOrCreateInterval(NewVReg);
1607
1608 // the spill weight is now infinity as it
1609 // cannot be spilled again
1610 nI.weight = HUGE_VALF;
1611
1612 // Rewrite register operands to use the new vreg.
1613 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1614 E = Indices.end(); I != E; ++I) {
1615 MI->getOperand(*I).setReg(NewVReg);
1616
1617 if (MI->getOperand(*I).isUse())
1618 MI->getOperand(*I).setIsKill(true);
1619 }
1620
1621 // Fill in the new live interval.
1622 SlotIndex index = getInstructionIndex(MI);
1623 if (HasUse) {
1624 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1625 nI.getNextValue(SlotIndex(), 0, false,
1626 getVNInfoAllocator()));
1627 DEBUG(dbgs() << " +" << LR);
1628 nI.addRange(LR);
1629 vrm.addRestorePoint(NewVReg, MI);
1630 }
1631 if (HasDef) {
1632 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1633 nI.getNextValue(SlotIndex(), 0, false,
1634 getVNInfoAllocator()));
1635 DEBUG(dbgs() << " +" << LR);
1636 nI.addRange(LR);
1637 vrm.addSpillPoint(NewVReg, true, MI);
1638 }
1639
1640 added.push_back(&nI);
1641
1642 DEBUG({
1643 dbgs() << "\t\t\t\tadded new interval: ";
1644 nI.dump();
1645 dbgs() << '\n';
1646 });
1647 }
1648
1649
1650 RI = mri_->reg_begin(li.reg);
1651 }
1652
1653 return added;
1654}
1655
1656std::vector<LiveInterval*> LiveIntervals::
1657addIntervalsForSpills(const LiveInterval &li,
1658 SmallVectorImpl<LiveInterval*> &SpillIs,
1659 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1660
1661 if (EnableFastSpilling)
1662 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1663
1664 assert(li.weight != HUGE_VALF &&
1665 "attempt to spill already spilled interval!");
1666
1667 DEBUG({
1668 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1669 li.print(dbgs(), tri_);
1670 dbgs() << '\n';
1671 });
1672
1673 // Each bit specify whether a spill is required in the MBB.
1674 BitVector SpillMBBs(mf_->getNumBlockIDs());
1675 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1676 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1677 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1678 DenseMap<unsigned,unsigned> MBBVRegsMap;
1679 std::vector<LiveInterval*> NewLIs;
1680 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1681
1682 unsigned NumValNums = li.getNumValNums();
1683 SmallVector<MachineInstr*, 4> ReMatDefs;
1684 ReMatDefs.resize(NumValNums, NULL);
1685 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1686 ReMatOrigDefs.resize(NumValNums, NULL);
1687 SmallVector<int, 4> ReMatIds;
1688 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1689 BitVector ReMatDelete(NumValNums);
1690 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1691
1692 // Spilling a split live interval. It cannot be split any further. Also,
1693 // it's also guaranteed to be a single val# / range interval.
1694 if (vrm.getPreSplitReg(li.reg)) {
1695 vrm.setIsSplitFromReg(li.reg, 0);
1696 // Unset the split kill marker on the last use.
1697 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1698 if (KillIdx != SlotIndex()) {
1699 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1700 assert(KillMI && "Last use disappeared?");
1701 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1702 assert(KillOp != -1 && "Last use disappeared?");
1703 KillMI->getOperand(KillOp).setIsKill(false);
1704 }
1705 vrm.removeKillPoint(li.reg);
1706 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1707 Slot = vrm.getStackSlot(li.reg);
1708 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1709 MachineInstr *ReMatDefMI = DefIsReMat ?
1710 vrm.getReMaterializedMI(li.reg) : NULL;
1711 int LdSlot = 0;
1712 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1713 bool isLoad = isLoadSS ||
1714 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1715 bool IsFirstRange = true;
1716 for (LiveInterval::Ranges::const_iterator
1717 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718 // If this is a split live interval with multiple ranges, it means there
1719 // are two-address instructions that re-defined the value. Only the
1720 // first def can be rematerialized!
1721 if (IsFirstRange) {
1722 // Note ReMatOrigDefMI has already been deleted.
1723 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1724 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1725 false, vrm, rc, ReMatIds, loopInfo,
1726 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1727 MBBVRegsMap, NewLIs);
1728 } else {
1729 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1730 Slot, 0, false, false, false,
1731 false, vrm, rc, ReMatIds, loopInfo,
1732 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1733 MBBVRegsMap, NewLIs);
1734 }
1735 IsFirstRange = false;
1736 }
1737
1738 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1739 return NewLIs;
1740 }
1741
1742 bool TrySplit = !intervalIsInOneMBB(li);
1743 if (TrySplit)
1744 ++numSplits;
1745 bool NeedStackSlot = false;
1746 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1747 i != e; ++i) {
1748 const VNInfo *VNI = *i;
1749 unsigned VN = VNI->id;
1750 if (VNI->isUnused())
1751 continue; // Dead val#.
1752 // Is the def for the val# rematerializable?
1753 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1754 ? getInstructionFromIndex(VNI->def) : 0;
1755 bool dummy;
1756 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1757 // Remember how to remat the def of this val#.
1758 ReMatOrigDefs[VN] = ReMatDefMI;
1759 // Original def may be modified so we have to make a copy here.
1760 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1761 CloneMIs.push_back(Clone);
1762 ReMatDefs[VN] = Clone;
1763
1764 bool CanDelete = true;
1765 if (VNI->hasPHIKill()) {
1766 // A kill is a phi node, not all of its uses can be rematerialized.
1767 // It must not be deleted.
1768 CanDelete = false;
1769 // Need a stack slot if there is any live range where uses cannot be
1770 // rematerialized.
1771 NeedStackSlot = true;
1772 }
1773 if (CanDelete)
1774 ReMatDelete.set(VN);
1775 } else {
1776 // Need a stack slot if there is any live range where uses cannot be
1777 // rematerialized.
1778 NeedStackSlot = true;
1779 }
1780 }
1781
1782 // One stack slot per live interval.
1783 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1784 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1785 Slot = vrm.assignVirt2StackSlot(li.reg);
1786
1787 // This case only occurs when the prealloc splitter has already assigned
1788 // a stack slot to this vreg.
1789 else
1790 Slot = vrm.getStackSlot(li.reg);
1791 }
1792
1793 // Create new intervals and rewrite defs and uses.
1794 for (LiveInterval::Ranges::const_iterator
1795 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1796 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1797 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1798 bool DefIsReMat = ReMatDefMI != NULL;
1799 bool CanDelete = ReMatDelete[I->valno->id];
1800 int LdSlot = 0;
1801 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1802 bool isLoad = isLoadSS ||
1803 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1804 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1805 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1806 CanDelete, vrm, rc, ReMatIds, loopInfo,
1807 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1808 MBBVRegsMap, NewLIs);
1809 }
1810
1811 // Insert spills / restores if we are splitting.
1812 if (!TrySplit) {
1813 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1814 return NewLIs;
1815 }
1816
1817 SmallPtrSet<LiveInterval*, 4> AddedKill;
1818 SmallVector<unsigned, 2> Ops;
1819 if (NeedStackSlot) {
1820 int Id = SpillMBBs.find_first();
1821 while (Id != -1) {
1822 std::vector<SRInfo> &spills = SpillIdxes[Id];
1823 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1824 SlotIndex index = spills[i].index;
1825 unsigned VReg = spills[i].vreg;
1826 LiveInterval &nI = getOrCreateInterval(VReg);
1827 bool isReMat = vrm.isReMaterialized(VReg);
1828 MachineInstr *MI = getInstructionFromIndex(index);
1829 bool CanFold = false;
1830 bool FoundUse = false;
1831 Ops.clear();
1832 if (spills[i].canFold) {
1833 CanFold = true;
1834 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1835 MachineOperand &MO = MI->getOperand(j);
1836 if (!MO.isReg() || MO.getReg() != VReg)
1837 continue;
1838
1839 Ops.push_back(j);
1840 if (MO.isDef())
1841 continue;
1842 if (isReMat ||
1843 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1844 RestoreMBBs, RestoreIdxes))) {
1845 // MI has two-address uses of the same register. If the use
1846 // isn't the first and only use in the BB, then we can't fold
1847 // it. FIXME: Move this to rewriteInstructionsForSpills.
1848 CanFold = false;
1849 break;
1850 }
1851 FoundUse = true;
1852 }
1853 }
1854 // Fold the store into the def if possible.
1855 bool Folded = false;
1856 if (CanFold && !Ops.empty()) {
1857 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1858 Folded = true;
1859 if (FoundUse) {
1860 // Also folded uses, do not issue a load.
1861 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1862 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1863 }
1864 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1865 }
1866 }
1867
1868 // Otherwise tell the spiller to issue a spill.
1869 if (!Folded) {
1870 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1871 bool isKill = LR->end == index.getStoreIndex();
1872 if (!MI->registerDefIsDead(nI.reg))
1873 // No need to spill a dead def.
1874 vrm.addSpillPoint(VReg, isKill, MI);
1875 if (isKill)
1876 AddedKill.insert(&nI);
1877 }
1878 }
1879 Id = SpillMBBs.find_next(Id);
1880 }
1881 }
1882
1883 int Id = RestoreMBBs.find_first();
1884 while (Id != -1) {
1885 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1886 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1887 SlotIndex index = restores[i].index;
1888 if (index == SlotIndex())
1889 continue;
1890 unsigned VReg = restores[i].vreg;
1891 LiveInterval &nI = getOrCreateInterval(VReg);
1892 bool isReMat = vrm.isReMaterialized(VReg);
1893 MachineInstr *MI = getInstructionFromIndex(index);
1894 bool CanFold = false;
1895 Ops.clear();
1896 if (restores[i].canFold) {
1897 CanFold = true;
1898 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1899 MachineOperand &MO = MI->getOperand(j);
1900 if (!MO.isReg() || MO.getReg() != VReg)
1901 continue;
1902
1903 if (MO.isDef()) {
1904 // If this restore were to be folded, it would have been folded
1905 // already.
1906 CanFold = false;
1907 break;
1908 }
1909 Ops.push_back(j);
1910 }
1911 }
1912
1913 // Fold the load into the use if possible.
1914 bool Folded = false;
1915 if (CanFold && !Ops.empty()) {
1916 if (!isReMat)
1917 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1918 else {
1919 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1920 int LdSlot = 0;
1921 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1922 // If the rematerializable def is a load, also try to fold it.
1923 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1924 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1925 Ops, isLoadSS, LdSlot, VReg);
1926 if (!Folded) {
1927 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1928 if (ImpUse) {
1929 // Re-matting an instruction with virtual register use. Add the
1930 // register as an implicit use on the use MI and update the register
1931 // interval's spill weight to HUGE_VALF to prevent it from being
1932 // spilled.
1933 LiveInterval &ImpLi = getInterval(ImpUse);
1934 ImpLi.weight = HUGE_VALF;
1935 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1936 }
1937 }
1938 }
1939 }
1940 // If folding is not possible / failed, then tell the spiller to issue a
1941 // load / rematerialization for us.
1942 if (Folded)
1943 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1944 else
1945 vrm.addRestorePoint(VReg, MI);
1946 }
1947 Id = RestoreMBBs.find_next(Id);
1948 }
1949
1950 // Finalize intervals: add kills, finalize spill weights, and filter out
1951 // dead intervals.
1952 std::vector<LiveInterval*> RetNewLIs;
1953 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1954 LiveInterval *LI = NewLIs[i];
1955 if (!LI->empty()) {
1956 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1957 if (!AddedKill.count(LI)) {
1958 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1959 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1960 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1961 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1962 assert(UseIdx != -1);
1963 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1964 LastUse->getOperand(UseIdx).setIsKill();
1965 vrm.addKillPoint(LI->reg, LastUseIdx);
1966 }
1967 }
1968 RetNewLIs.push_back(LI);
1969 }
1970 }
1971
1972 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1973 return RetNewLIs;
1974}
1975
1976/// hasAllocatableSuperReg - Return true if the specified physical register has
1977/// any super register that's allocatable.
1978bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1979 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1980 if (allocatableRegs_[*AS] && hasInterval(*AS))
1981 return true;
1982 return false;
1983}
1984
1985/// getRepresentativeReg - Find the largest super register of the specified
1986/// physical register.
1987unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1988 // Find the largest super-register that is allocatable.
1989 unsigned BestReg = Reg;
1990 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1991 unsigned SuperReg = *AS;
1992 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1993 BestReg = SuperReg;
1994 break;
1995 }
1996 }
1997 return BestReg;
1998}
1999
2000/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2001/// specified interval that conflicts with the specified physical register.
2002unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2003 unsigned PhysReg) const {
2004 unsigned NumConflicts = 0;
2005 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2006 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2007 E = mri_->reg_end(); I != E; ++I) {
2008 MachineOperand &O = I.getOperand();
2009 MachineInstr *MI = O.getParent();
2010 SlotIndex Index = getInstructionIndex(MI);
2011 if (pli.liveAt(Index))
2012 ++NumConflicts;
2013 }
2014 return NumConflicts;
2015}
2016
2017/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2018/// around all defs and uses of the specified interval. Return true if it
2019/// was able to cut its interval.
2020bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2021 unsigned PhysReg, VirtRegMap &vrm) {
2022 unsigned SpillReg = getRepresentativeReg(PhysReg);
2023
2024 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2025 // If there are registers which alias PhysReg, but which are not a
2026 // sub-register of the chosen representative super register. Assert
2027 // since we can't handle it yet.
2028 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2029 tri_->isSuperRegister(*AS, SpillReg));
2030
2031 bool Cut = false;
2032 SmallVector<unsigned, 4> PRegs;
2033 if (hasInterval(SpillReg))
2034 PRegs.push_back(SpillReg);
2035 else {
2036 SmallSet<unsigned, 4> Added;
2037 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2038 if (Added.insert(*AS) && hasInterval(*AS)) {
2039 PRegs.push_back(*AS);
2040 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2041 Added.insert(*ASS);
2042 }
2043 }
2044
2045 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2046 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2047 E = mri_->reg_end(); I != E; ++I) {
2048 MachineOperand &O = I.getOperand();
2049 MachineInstr *MI = O.getParent();
2050 if (SeenMIs.count(MI))
2051 continue;
2052 SeenMIs.insert(MI);
2053 SlotIndex Index = getInstructionIndex(MI);
2054 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2055 unsigned PReg = PRegs[i];
2056 LiveInterval &pli = getInterval(PReg);
2057 if (!pli.liveAt(Index))
2058 continue;
2059 vrm.addEmergencySpill(PReg, MI);
2060 SlotIndex StartIdx = Index.getLoadIndex();
2061 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2062 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2063 pli.removeRange(StartIdx, EndIdx);
2064 Cut = true;
2065 } else {
2066 std::string msg;
2067 raw_string_ostream Msg(msg);
2068 Msg << "Ran out of registers during register allocation!";
2069 if (MI->isInlineAsm()) {
2070 Msg << "\nPlease check your inline asm statement for invalid "
2071 << "constraints:\n";
2072 MI->print(Msg, tm_);
2073 }
2074 llvm_report_error(Msg.str());
2075 }
2076 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2077 if (!hasInterval(*AS))
2078 continue;
2079 LiveInterval &spli = getInterval(*AS);
2080 if (spli.liveAt(Index))
2081 spli.removeRange(Index.getLoadIndex(),
2082 Index.getNextIndex().getBaseIndex());
2083 }
2084 }
2085 }
2086 return Cut;
2087}
2088
2089LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2090 MachineInstr* startInst) {
2091 LiveInterval& Interval = getOrCreateInterval(reg);
2092 VNInfo* VN = Interval.getNextValue(
2093 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2094 startInst, true, getVNInfoAllocator());
2095 VN->setHasPHIKill(true);
2096 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2097 LiveRange LR(
2098 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2099 getMBBEndIdx(startInst->getParent()), VN);
2100 Interval.addRange(LR);
2101
2102 return LR;
2103}
2104