blob: d1b65c4748b7ba84561ac2571e431bfc2f94adb2 [file] [log] [blame]
David Greene25133302007-06-08 17:18:56 +00001//===-- SimpleRegisterCoalescing.cpp - Register Coalescing ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
David Greene25133302007-06-08 17:18:56 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a simple register coalescing pass that attempts to
11// aggressively coalesce every register copy that it can.
12//
13//===----------------------------------------------------------------------===//
14
Evan Cheng3b1f55e2007-07-31 22:37:44 +000015#define DEBUG_TYPE "regcoalescing"
Evan Chenga461c4d2007-11-05 17:41:38 +000016#include "SimpleRegisterCoalescing.h"
David Greene25133302007-06-08 17:18:56 +000017#include "VirtRegMap.h"
Evan Chenga461c4d2007-11-05 17:41:38 +000018#include "llvm/CodeGen/LiveIntervalAnalysis.h"
David Greene25133302007-06-08 17:18:56 +000019#include "llvm/Value.h"
David Greene25133302007-06-08 17:18:56 +000020#include "llvm/CodeGen/LiveVariables.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineInstr.h"
Evan Cheng22f07ff2007-12-11 02:09:15 +000023#include "llvm/CodeGen/MachineLoopInfo.h"
Chris Lattner84bc5422007-12-31 04:13:23 +000024#include "llvm/CodeGen/MachineRegisterInfo.h"
David Greene25133302007-06-08 17:18:56 +000025#include "llvm/CodeGen/Passes.h"
David Greene2c17c4d2007-09-06 16:18:45 +000026#include "llvm/CodeGen/RegisterCoalescer.h"
David Greene25133302007-06-08 17:18:56 +000027#include "llvm/Target/TargetInstrInfo.h"
28#include "llvm/Target/TargetMachine.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/ADT/SmallSet.h"
32#include "llvm/ADT/Statistic.h"
33#include "llvm/ADT/STLExtras.h"
34#include <algorithm>
35#include <cmath>
36using namespace llvm;
37
38STATISTIC(numJoins , "Number of interval joins performed");
39STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
40STATISTIC(numAborts , "Number of times interval joining aborted");
41
42char SimpleRegisterCoalescing::ID = 0;
43namespace {
44 static cl::opt<bool>
45 EnableJoining("join-liveintervals",
Gabor Greife510b3a2007-07-09 12:00:59 +000046 cl::desc("Coalesce copies (default=true)"),
David Greene25133302007-06-08 17:18:56 +000047 cl::init(true));
48
Evan Cheng8fc9a102007-11-06 08:52:21 +000049 static cl::opt<bool>
50 NewHeuristic("new-coalescer-heuristic",
51 cl::desc("Use new coalescer heuristic"),
52 cl::init(false));
53
David Greene25133302007-06-08 17:18:56 +000054 RegisterPass<SimpleRegisterCoalescing>
Chris Lattnere76fad22007-08-05 18:45:33 +000055 X("simple-register-coalescing", "Simple Register Coalescing");
David Greene2c17c4d2007-09-06 16:18:45 +000056
57 // Declare that we implement the RegisterCoalescer interface
58 RegisterAnalysisGroup<RegisterCoalescer, true/*The Default*/> V(X);
David Greene25133302007-06-08 17:18:56 +000059}
60
61const PassInfo *llvm::SimpleRegisterCoalescingID = X.getPassInfo();
62
63void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
David Greene25133302007-06-08 17:18:56 +000064 AU.addPreserved<LiveIntervals>();
Bill Wendling67d65bb2008-01-04 20:54:55 +000065 AU.addPreserved<MachineLoopInfo>();
66 AU.addPreservedID(MachineDominatorsID);
David Greene25133302007-06-08 17:18:56 +000067 AU.addPreservedID(PHIEliminationID);
68 AU.addPreservedID(TwoAddressInstructionPassID);
69 AU.addRequired<LiveVariables>();
70 AU.addRequired<LiveIntervals>();
Evan Cheng22f07ff2007-12-11 02:09:15 +000071 AU.addRequired<MachineLoopInfo>();
David Greene25133302007-06-08 17:18:56 +000072 MachineFunctionPass::getAnalysisUsage(AU);
73}
74
Gabor Greife510b3a2007-07-09 12:00:59 +000075/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
David Greene25133302007-06-08 17:18:56 +000076/// being the source and IntB being the dest, thus this defines a value number
77/// in IntB. If the source value number (in IntA) is defined by a copy from B,
78/// see if we can merge these two pieces of B into a single value number,
79/// eliminating a copy. For example:
80///
81/// A3 = B0
82/// ...
83/// B1 = A3 <- this copy
84///
85/// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
86/// value number to be replaced with B0 (which simplifies the B liveinterval).
87///
88/// This returns true if an interval was modified.
89///
Bill Wendling2674d712008-01-04 08:59:18 +000090bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
91 LiveInterval &IntB,
92 MachineInstr *CopyMI) {
David Greene25133302007-06-08 17:18:56 +000093 unsigned CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
94
95 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
96 // the example above.
97 LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
Evan Cheng7ecb38b2007-08-29 20:45:00 +000098 VNInfo *BValNo = BLR->valno;
David Greene25133302007-06-08 17:18:56 +000099
100 // Get the location that B is defined at. Two options: either this value has
101 // an unknown definition point or it is defined at CopyIdx. If unknown, we
102 // can't process it.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000103 if (!BValNo->reg) return false;
104 assert(BValNo->def == CopyIdx &&
David Greene25133302007-06-08 17:18:56 +0000105 "Copy doesn't define the value?");
106
107 // AValNo is the value number in A that defines the copy, A0 in the example.
108 LiveInterval::iterator AValLR = IntA.FindLiveRangeContaining(CopyIdx-1);
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000109 VNInfo *AValNo = AValLR->valno;
David Greene25133302007-06-08 17:18:56 +0000110
111 // If AValNo is defined as a copy from IntB, we can potentially process this.
112
113 // Get the instruction that defines this value number.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000114 unsigned SrcReg = AValNo->reg;
David Greene25133302007-06-08 17:18:56 +0000115 if (!SrcReg) return false; // Not defined by a copy.
116
117 // If the value number is not defined by a copy instruction, ignore it.
118
119 // If the source register comes from an interval other than IntB, we can't
120 // handle this.
121 if (rep(SrcReg) != IntB.reg) return false;
122
123 // Get the LiveRange in IntB that this value number starts with.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000124 LiveInterval::iterator ValLR = IntB.FindLiveRangeContaining(AValNo->def-1);
David Greene25133302007-06-08 17:18:56 +0000125
126 // Make sure that the end of the live range is inside the same block as
127 // CopyMI.
128 MachineInstr *ValLREndInst = li_->getInstructionFromIndex(ValLR->end-1);
129 if (!ValLREndInst ||
130 ValLREndInst->getParent() != CopyMI->getParent()) return false;
131
132 // Okay, we now know that ValLR ends in the same block that the CopyMI
133 // live-range starts. If there are no intervening live ranges between them in
134 // IntB, we can merge them.
135 if (ValLR+1 != BLR) return false;
Evan Chengdc5294f2007-08-14 23:19:28 +0000136
137 // If a live interval is a physical register, conservatively check if any
138 // of its sub-registers is overlapping the live interval of the virtual
139 // register. If so, do not coalesce.
140 if (MRegisterInfo::isPhysicalRegister(IntB.reg) &&
141 *mri_->getSubRegisters(IntB.reg)) {
142 for (const unsigned* SR = mri_->getSubRegisters(IntB.reg); *SR; ++SR)
143 if (li_->hasInterval(*SR) && IntA.overlaps(li_->getInterval(*SR))) {
144 DOUT << "Interfere with sub-register ";
145 DEBUG(li_->getInterval(*SR).print(DOUT, mri_));
146 return false;
147 }
148 }
David Greene25133302007-06-08 17:18:56 +0000149
150 DOUT << "\nExtending: "; IntB.print(DOUT, mri_);
151
Evan Chenga8d94f12007-08-07 23:49:57 +0000152 unsigned FillerStart = ValLR->end, FillerEnd = BLR->start;
David Greene25133302007-06-08 17:18:56 +0000153 // We are about to delete CopyMI, so need to remove it as the 'instruction
Evan Chenga8d94f12007-08-07 23:49:57 +0000154 // that defines this value #'. Update the the valnum with the new defining
155 // instruction #.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000156 BValNo->def = FillerStart;
157 BValNo->reg = 0;
David Greene25133302007-06-08 17:18:56 +0000158
159 // Okay, we can merge them. We need to insert a new liverange:
160 // [ValLR.end, BLR.begin) of either value number, then we merge the
161 // two value numbers.
David Greene25133302007-06-08 17:18:56 +0000162 IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
163
164 // If the IntB live range is assigned to a physical register, and if that
165 // physreg has aliases,
166 if (MRegisterInfo::isPhysicalRegister(IntB.reg)) {
167 // Update the liveintervals of sub-registers.
168 for (const unsigned *AS = mri_->getSubRegisters(IntB.reg); *AS; ++AS) {
169 LiveInterval &AliasLI = li_->getInterval(*AS);
170 AliasLI.addRange(LiveRange(FillerStart, FillerEnd,
Evan Chengf3bb2e62007-09-05 21:46:51 +0000171 AliasLI.getNextValue(FillerStart, 0, li_->getVNInfoAllocator())));
David Greene25133302007-06-08 17:18:56 +0000172 }
173 }
174
175 // Okay, merge "B1" into the same value number as "B0".
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000176 if (BValNo != ValLR->valno)
177 IntB.MergeValueNumberInto(BValNo, ValLR->valno);
David Greene25133302007-06-08 17:18:56 +0000178 DOUT << " result = "; IntB.print(DOUT, mri_);
179 DOUT << "\n";
180
181 // If the source instruction was killing the source register before the
182 // merge, unset the isKill marker given the live range has been extended.
183 int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
184 if (UIdx != -1)
Chris Lattnerf7382302007-12-30 21:56:09 +0000185 ValLREndInst->getOperand(UIdx).setIsKill(false);
David Greene25133302007-06-08 17:18:56 +0000186
David Greene25133302007-06-08 17:18:56 +0000187 ++numPeep;
188 return true;
189}
190
Evan Cheng4ae31a52007-10-18 07:49:59 +0000191/// AddSubRegIdxPairs - Recursively mark all the registers represented by the
192/// specified register as sub-registers. The recursion level is expected to be
193/// shallow.
194void SimpleRegisterCoalescing::AddSubRegIdxPairs(unsigned Reg, unsigned SubIdx) {
195 std::vector<unsigned> &JoinedRegs = r2rRevMap_[Reg];
196 for (unsigned i = 0, e = JoinedRegs.size(); i != e; ++i) {
197 SubRegIdxes.push_back(std::make_pair(JoinedRegs[i], SubIdx));
198 AddSubRegIdxPairs(JoinedRegs[i], SubIdx);
199 }
200}
201
Evan Cheng8fc9a102007-11-06 08:52:21 +0000202/// isBackEdgeCopy - Returns true if CopyMI is a back edge copy.
203///
204bool SimpleRegisterCoalescing::isBackEdgeCopy(MachineInstr *CopyMI,
205 unsigned DstReg) {
206 MachineBasicBlock *MBB = CopyMI->getParent();
Evan Cheng22f07ff2007-12-11 02:09:15 +0000207 const MachineLoop *L = loopInfo->getLoopFor(MBB);
Evan Cheng8fc9a102007-11-06 08:52:21 +0000208 if (!L)
209 return false;
Evan Cheng22f07ff2007-12-11 02:09:15 +0000210 if (MBB != L->getLoopLatch())
Evan Cheng8fc9a102007-11-06 08:52:21 +0000211 return false;
212
213 DstReg = rep(DstReg);
214 LiveInterval &LI = li_->getInterval(DstReg);
215 unsigned DefIdx = li_->getInstructionIndex(CopyMI);
216 LiveInterval::const_iterator DstLR =
217 LI.FindLiveRangeContaining(li_->getDefIndex(DefIdx));
218 if (DstLR == LI.end())
219 return false;
220 unsigned KillIdx = li_->getInstructionIndex(&MBB->back()) + InstrSlots::NUM-1;
221 if (DstLR->valno->kills.size() == 1 && DstLR->valno->kills[0] == KillIdx)
222 return true;
223 return false;
224}
225
David Greene25133302007-06-08 17:18:56 +0000226/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
227/// which are the src/dst of the copy instruction CopyMI. This returns true
Evan Cheng0547bab2007-11-01 06:22:48 +0000228/// if the copy was successfully coalesced away. If it is not currently
229/// possible to coalesce this interval, but it may be possible if other
230/// things get coalesced, then it returns true by reference in 'Again'.
Evan Cheng8fc9a102007-11-06 08:52:21 +0000231bool SimpleRegisterCoalescing::JoinCopy(CopyRec TheCopy, bool &Again) {
232 MachineInstr *CopyMI = TheCopy.MI;
233
234 Again = false;
235 if (JoinedCopies.count(CopyMI))
236 return false; // Already done.
237
David Greene25133302007-06-08 17:18:56 +0000238 DOUT << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI;
239
240 // Get representative registers.
Evan Cheng8fc9a102007-11-06 08:52:21 +0000241 unsigned SrcReg = TheCopy.SrcReg;
242 unsigned DstReg = TheCopy.DstReg;
David Greene25133302007-06-08 17:18:56 +0000243 unsigned repSrcReg = rep(SrcReg);
244 unsigned repDstReg = rep(DstReg);
245
246 // If they are already joined we continue.
247 if (repSrcReg == repDstReg) {
Gabor Greife510b3a2007-07-09 12:00:59 +0000248 DOUT << "\tCopy already coalesced.\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000249 return false; // Not coalescable.
David Greene25133302007-06-08 17:18:56 +0000250 }
251
252 bool SrcIsPhys = MRegisterInfo::isPhysicalRegister(repSrcReg);
253 bool DstIsPhys = MRegisterInfo::isPhysicalRegister(repDstReg);
David Greene25133302007-06-08 17:18:56 +0000254
255 // If they are both physical registers, we cannot join them.
256 if (SrcIsPhys && DstIsPhys) {
Gabor Greife510b3a2007-07-09 12:00:59 +0000257 DOUT << "\tCan not coalesce physregs.\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000258 return false; // Not coalescable.
David Greene25133302007-06-08 17:18:56 +0000259 }
260
261 // We only join virtual registers with allocatable physical registers.
262 if (SrcIsPhys && !allocatableRegs_[repSrcReg]) {
263 DOUT << "\tSrc reg is unallocatable physreg.\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000264 return false; // Not coalescable.
David Greene25133302007-06-08 17:18:56 +0000265 }
266 if (DstIsPhys && !allocatableRegs_[repDstReg]) {
267 DOUT << "\tDst reg is unallocatable physreg.\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000268 return false; // Not coalescable.
David Greene25133302007-06-08 17:18:56 +0000269 }
Evan Cheng32dfbea2007-10-12 08:50:34 +0000270
271 bool isExtSubReg = CopyMI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG;
272 unsigned RealDstReg = 0;
273 if (isExtSubReg) {
274 unsigned SubIdx = CopyMI->getOperand(2).getImm();
275 if (SrcIsPhys)
276 // r1024 = EXTRACT_SUBREG EAX, 0 then r1024 is really going to be
277 // coalesced with AX.
278 repSrcReg = mri_->getSubReg(repSrcReg, SubIdx);
279 else if (DstIsPhys) {
280 // If this is a extract_subreg where dst is a physical register, e.g.
281 // cl = EXTRACT_SUBREG reg1024, 1
282 // then create and update the actual physical register allocated to RHS.
Chris Lattner84bc5422007-12-31 04:13:23 +0000283 const TargetRegisterClass *RC=mf_->getRegInfo().getRegClass(repSrcReg);
Evan Cheng32dfbea2007-10-12 08:50:34 +0000284 for (const unsigned *SRs = mri_->getSuperRegisters(repDstReg);
285 unsigned SR = *SRs; ++SRs) {
286 if (repDstReg == mri_->getSubReg(SR, SubIdx) &&
287 RC->contains(SR)) {
288 RealDstReg = SR;
289 break;
290 }
291 }
292 assert(RealDstReg && "Invalid extra_subreg instruction!");
293
294 // For this type of EXTRACT_SUBREG, conservatively
295 // check if the live interval of the source register interfere with the
296 // actual super physical register we are trying to coalesce with.
297 LiveInterval &RHS = li_->getInterval(repSrcReg);
298 if (li_->hasInterval(RealDstReg) &&
299 RHS.overlaps(li_->getInterval(RealDstReg))) {
300 DOUT << "Interfere with register ";
301 DEBUG(li_->getInterval(RealDstReg).print(DOUT, mri_));
Evan Cheng0547bab2007-11-01 06:22:48 +0000302 return false; // Not coalescable
Evan Cheng32dfbea2007-10-12 08:50:34 +0000303 }
304 for (const unsigned* SR = mri_->getSubRegisters(RealDstReg); *SR; ++SR)
305 if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
306 DOUT << "Interfere with sub-register ";
307 DEBUG(li_->getInterval(*SR).print(DOUT, mri_));
Evan Cheng0547bab2007-11-01 06:22:48 +0000308 return false; // Not coalescable
Evan Cheng32dfbea2007-10-12 08:50:34 +0000309 }
Evan Cheng0547bab2007-11-01 06:22:48 +0000310 } else {
311 unsigned SrcSize= li_->getInterval(repSrcReg).getSize() / InstrSlots::NUM;
312 unsigned DstSize= li_->getInterval(repDstReg).getSize() / InstrSlots::NUM;
Chris Lattner84bc5422007-12-31 04:13:23 +0000313 const TargetRegisterClass *RC=mf_->getRegInfo().getRegClass(repDstReg);
Evan Cheng0547bab2007-11-01 06:22:48 +0000314 unsigned Threshold = allocatableRCRegs_[RC].count();
Evan Cheng52c7ff72007-10-12 09:15:53 +0000315 // Be conservative. If both sides are virtual registers, do not coalesce
Evan Cheng0547bab2007-11-01 06:22:48 +0000316 // if this will cause a high use density interval to target a smaller set
317 // of registers.
318 if (DstSize > Threshold || SrcSize > Threshold) {
319 LiveVariables::VarInfo &svi = lv_->getVarInfo(repSrcReg);
320 LiveVariables::VarInfo &dvi = lv_->getVarInfo(repDstReg);
321 if ((float)dvi.NumUses / DstSize < (float)svi.NumUses / SrcSize) {
322 Again = true; // May be possible to coalesce later.
323 return false;
324 }
325 }
Evan Cheng32dfbea2007-10-12 08:50:34 +0000326 }
327 } else if (differingRegisterClasses(repSrcReg, repDstReg)) {
328 // If they are not of the same register class, we cannot join them.
David Greene25133302007-06-08 17:18:56 +0000329 DOUT << "\tSrc/Dest are different register classes.\n";
Evan Cheng32dfbea2007-10-12 08:50:34 +0000330 // Allow the coalescer to try again in case either side gets coalesced to
331 // a physical register that's compatible with the other side. e.g.
332 // r1024 = MOV32to32_ r1025
333 // but later r1024 is assigned EAX then r1025 may be coalesced with EAX.
Evan Cheng0547bab2007-11-01 06:22:48 +0000334 Again = true; // May be possible to coalesce later.
Evan Cheng32dfbea2007-10-12 08:50:34 +0000335 return false;
David Greene25133302007-06-08 17:18:56 +0000336 }
337
338 LiveInterval &SrcInt = li_->getInterval(repSrcReg);
339 LiveInterval &DstInt = li_->getInterval(repDstReg);
340 assert(SrcInt.reg == repSrcReg && DstInt.reg == repDstReg &&
341 "Register mapping is horribly broken!");
342
343 DOUT << "\t\tInspecting "; SrcInt.print(DOUT, mri_);
344 DOUT << " and "; DstInt.print(DOUT, mri_);
345 DOUT << ": ";
346
347 // Check if it is necessary to propagate "isDead" property before intervals
348 // are joined.
349 MachineOperand *mopd = CopyMI->findRegisterDefOperand(DstReg);
350 bool isDead = mopd->isDead();
351 bool isShorten = false;
352 unsigned SrcStart = 0, RemoveStart = 0;
353 unsigned SrcEnd = 0, RemoveEnd = 0;
354 if (isDead) {
355 unsigned CopyIdx = li_->getInstructionIndex(CopyMI);
356 LiveInterval::iterator SrcLR =
357 SrcInt.FindLiveRangeContaining(li_->getUseIndex(CopyIdx));
358 RemoveStart = SrcStart = SrcLR->start;
359 RemoveEnd = SrcEnd = SrcLR->end;
360 // The instruction which defines the src is only truly dead if there are
361 // no intermediate uses and there isn't a use beyond the copy.
362 // FIXME: find the last use, mark is kill and shorten the live range.
363 if (SrcEnd > li_->getDefIndex(CopyIdx)) {
364 isDead = false;
365 } else {
366 MachineOperand *MOU;
367 MachineInstr *LastUse= lastRegisterUse(SrcStart, CopyIdx, repSrcReg, MOU);
368 if (LastUse) {
369 // Shorten the liveinterval to the end of last use.
370 MOU->setIsKill();
371 isDead = false;
372 isShorten = true;
373 RemoveStart = li_->getDefIndex(li_->getInstructionIndex(LastUse));
374 RemoveEnd = SrcEnd;
375 } else {
376 MachineInstr *SrcMI = li_->getInstructionFromIndex(SrcStart);
377 if (SrcMI) {
378 MachineOperand *mops = findDefOperand(SrcMI, repSrcReg);
379 if (mops)
380 // A dead def should have a single cycle interval.
381 ++RemoveStart;
382 }
383 }
384 }
385 }
386
387 // We need to be careful about coalescing a source physical register with a
388 // virtual register. Once the coalescing is done, it cannot be broken and
389 // these are not spillable! If the destination interval uses are far away,
390 // think twice about coalescing them!
Evan Cheng32dfbea2007-10-12 08:50:34 +0000391 if (!mopd->isDead() && (SrcIsPhys || DstIsPhys) && !isExtSubReg) {
David Greene25133302007-06-08 17:18:56 +0000392 LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt;
393 unsigned JoinVReg = SrcIsPhys ? repDstReg : repSrcReg;
394 unsigned JoinPReg = SrcIsPhys ? repSrcReg : repDstReg;
Chris Lattner84bc5422007-12-31 04:13:23 +0000395 const TargetRegisterClass *RC = mf_->getRegInfo().getRegClass(JoinVReg);
Evan Cheng68949422007-12-20 02:23:25 +0000396 unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
Evan Cheng8fc9a102007-11-06 08:52:21 +0000397 if (TheCopy.isBackEdge)
398 Threshold *= 2; // Favors back edge copies.
David Greene25133302007-06-08 17:18:56 +0000399
Evan Cheng32dfbea2007-10-12 08:50:34 +0000400 // If the virtual register live interval is long but it has low use desity,
David Greene25133302007-06-08 17:18:56 +0000401 // do not join them, instead mark the physical register as its allocation
402 // preference.
403 unsigned Length = JoinVInt.getSize() / InstrSlots::NUM;
404 LiveVariables::VarInfo &vi = lv_->getVarInfo(JoinVReg);
405 if (Length > Threshold &&
406 (((float)vi.NumUses / Length) < (1.0 / Threshold))) {
407 JoinVInt.preference = JoinPReg;
408 ++numAborts;
409 DOUT << "\tMay tie down a physical register, abort!\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000410 Again = true; // May be possible to coalesce later.
David Greene25133302007-06-08 17:18:56 +0000411 return false;
412 }
413 }
414
415 // Okay, attempt to join these two intervals. On failure, this returns false.
416 // Otherwise, if one of the intervals being joined is a physreg, this method
417 // always canonicalizes DstInt to be it. The output "SrcInt" will not have
418 // been modified, so we can use this information below to update aliases.
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000419 bool Swapped = false;
420 if (JoinIntervals(DstInt, SrcInt, Swapped)) {
David Greene25133302007-06-08 17:18:56 +0000421 if (isDead) {
422 // Result of the copy is dead. Propagate this property.
423 if (SrcStart == 0) {
424 assert(MRegisterInfo::isPhysicalRegister(repSrcReg) &&
425 "Live-in must be a physical register!");
426 // Live-in to the function but dead. Remove it from entry live-in set.
427 // JoinIntervals may end up swapping the two intervals.
428 mf_->begin()->removeLiveIn(repSrcReg);
429 } else {
430 MachineInstr *SrcMI = li_->getInstructionFromIndex(SrcStart);
431 if (SrcMI) {
432 MachineOperand *mops = findDefOperand(SrcMI, repSrcReg);
433 if (mops)
434 mops->setIsDead();
435 }
436 }
437 }
438
439 if (isShorten || isDead) {
Evan Chengccb36a42007-08-12 01:26:19 +0000440 // Shorten the destination live interval.
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000441 if (Swapped)
442 SrcInt.removeRange(RemoveStart, RemoveEnd);
David Greene25133302007-06-08 17:18:56 +0000443 }
444 } else {
Gabor Greife510b3a2007-07-09 12:00:59 +0000445 // Coalescing failed.
David Greene25133302007-06-08 17:18:56 +0000446
447 // If we can eliminate the copy without merging the live ranges, do so now.
Evan Cheng8fc9a102007-11-06 08:52:21 +0000448 if (!isExtSubReg && AdjustCopiesBackFrom(SrcInt, DstInt, CopyMI)) {
449 JoinedCopies.insert(CopyMI);
David Greene25133302007-06-08 17:18:56 +0000450 return true;
Evan Cheng8fc9a102007-11-06 08:52:21 +0000451 }
David Greene25133302007-06-08 17:18:56 +0000452
453 // Otherwise, we are unable to join the intervals.
454 DOUT << "Interference!\n";
Evan Cheng0547bab2007-11-01 06:22:48 +0000455 Again = true; // May be possible to coalesce later.
David Greene25133302007-06-08 17:18:56 +0000456 return false;
457 }
458
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000459 LiveInterval *ResSrcInt = &SrcInt;
460 LiveInterval *ResDstInt = &DstInt;
461 if (Swapped) {
David Greene25133302007-06-08 17:18:56 +0000462 std::swap(repSrcReg, repDstReg);
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000463 std::swap(ResSrcInt, ResDstInt);
464 }
David Greene25133302007-06-08 17:18:56 +0000465 assert(MRegisterInfo::isVirtualRegister(repSrcReg) &&
466 "LiveInterval::join didn't work right!");
467
468 // If we're about to merge live ranges into a physical register live range,
469 // we have to update any aliased register's live ranges to indicate that they
470 // have clobbered values for this range.
471 if (MRegisterInfo::isPhysicalRegister(repDstReg)) {
472 // Unset unnecessary kills.
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000473 if (!ResDstInt->containsOneValue()) {
474 for (LiveInterval::Ranges::const_iterator I = ResSrcInt->begin(),
475 E = ResSrcInt->end(); I != E; ++I)
David Greene25133302007-06-08 17:18:56 +0000476 unsetRegisterKills(I->start, I->end, repDstReg);
477 }
478
Evan Cheng32dfbea2007-10-12 08:50:34 +0000479 // If this is a extract_subreg where dst is a physical register, e.g.
480 // cl = EXTRACT_SUBREG reg1024, 1
481 // then create and update the actual physical register allocated to RHS.
482 if (RealDstReg) {
Evan Cheng32dfbea2007-10-12 08:50:34 +0000483 LiveInterval &RealDstInt = li_->getOrCreateInterval(RealDstReg);
Evan Chengf5c73592007-10-15 18:33:50 +0000484 SmallSet<const VNInfo*, 4> CopiedValNos;
485 for (LiveInterval::Ranges::const_iterator I = ResSrcInt->ranges.begin(),
486 E = ResSrcInt->ranges.end(); I != E; ++I) {
487 LiveInterval::const_iterator DstLR =
488 ResDstInt->FindLiveRangeContaining(I->start);
489 assert(DstLR != ResDstInt->end() && "Invalid joined interval!");
490 const VNInfo *DstValNo = DstLR->valno;
491 if (CopiedValNos.insert(DstValNo)) {
492 VNInfo *ValNo = RealDstInt.getNextValue(DstValNo->def, DstValNo->reg,
493 li_->getVNInfoAllocator());
Evan Chengc3fc7d92007-11-29 09:49:23 +0000494 ValNo->hasPHIKill = DstValNo->hasPHIKill;
Evan Chengf5c73592007-10-15 18:33:50 +0000495 RealDstInt.addKills(ValNo, DstValNo->kills);
496 RealDstInt.MergeValueInAsValue(*ResDstInt, DstValNo, ValNo);
497 }
Evan Cheng34729252007-10-14 10:08:34 +0000498 }
Evan Cheng32dfbea2007-10-12 08:50:34 +0000499 repDstReg = RealDstReg;
500 }
501
David Greene25133302007-06-08 17:18:56 +0000502 // Update the liveintervals of sub-registers.
503 for (const unsigned *AS = mri_->getSubRegisters(repDstReg); *AS; ++AS)
Evan Cheng32dfbea2007-10-12 08:50:34 +0000504 li_->getOrCreateInterval(*AS).MergeInClobberRanges(*ResSrcInt,
Evan Chengf3bb2e62007-09-05 21:46:51 +0000505 li_->getVNInfoAllocator());
David Greene25133302007-06-08 17:18:56 +0000506 } else {
507 // Merge use info if the destination is a virtual register.
508 LiveVariables::VarInfo& dVI = lv_->getVarInfo(repDstReg);
509 LiveVariables::VarInfo& sVI = lv_->getVarInfo(repSrcReg);
510 dVI.NumUses += sVI.NumUses;
511 }
512
David Greene25133302007-06-08 17:18:56 +0000513 // Remember these liveintervals have been joined.
514 JoinedLIs.set(repSrcReg - MRegisterInfo::FirstVirtualRegister);
515 if (MRegisterInfo::isVirtualRegister(repDstReg))
516 JoinedLIs.set(repDstReg - MRegisterInfo::FirstVirtualRegister);
517
Evan Cheng32dfbea2007-10-12 08:50:34 +0000518 if (isExtSubReg && !SrcIsPhys && !DstIsPhys) {
519 if (!Swapped) {
520 // Make sure we allocate the larger super-register.
521 ResSrcInt->Copy(*ResDstInt, li_->getVNInfoAllocator());
522 std::swap(repSrcReg, repDstReg);
523 std::swap(ResSrcInt, ResDstInt);
524 }
Evan Cheng4ae31a52007-10-18 07:49:59 +0000525 unsigned SubIdx = CopyMI->getOperand(2).getImm();
526 SubRegIdxes.push_back(std::make_pair(repSrcReg, SubIdx));
527 AddSubRegIdxPairs(repSrcReg, SubIdx);
Evan Cheng32dfbea2007-10-12 08:50:34 +0000528 }
529
Evan Cheng8fc9a102007-11-06 08:52:21 +0000530 if (NewHeuristic) {
531 for (LiveInterval::const_vni_iterator i = ResSrcInt->vni_begin(),
532 e = ResSrcInt->vni_end(); i != e; ++i) {
533 const VNInfo *vni = *i;
534 if (vni->def && vni->def != ~1U && vni->def != ~0U) {
535 MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
536 unsigned SrcReg, DstReg;
537 if (CopyMI && tii_->isMoveInstr(*CopyMI, SrcReg, DstReg) &&
538 JoinedCopies.count(CopyMI) == 0) {
Evan Cheng22f07ff2007-12-11 02:09:15 +0000539 unsigned LoopDepth = loopInfo->getLoopDepth(CopyMI->getParent());
Evan Cheng8fc9a102007-11-06 08:52:21 +0000540 JoinQueue->push(CopyRec(CopyMI, SrcReg, DstReg, LoopDepth,
541 isBackEdgeCopy(CopyMI, DstReg)));
542 }
543 }
544 }
545 }
546
Evan Cheng32dfbea2007-10-12 08:50:34 +0000547 DOUT << "\n\t\tJoined. Result = "; ResDstInt->print(DOUT, mri_);
548 DOUT << "\n";
549
Evan Cheng273288c2007-07-18 23:34:48 +0000550 // repSrcReg is guarateed to be the register whose live interval that is
551 // being merged.
David Greene25133302007-06-08 17:18:56 +0000552 li_->removeInterval(repSrcReg);
553 r2rMap_[repSrcReg] = repDstReg;
Evan Cheng4ae31a52007-10-18 07:49:59 +0000554 r2rRevMap_[repDstReg].push_back(repSrcReg);
David Greene25133302007-06-08 17:18:56 +0000555
556 // Finally, delete the copy instruction.
Evan Cheng8fc9a102007-11-06 08:52:21 +0000557 JoinedCopies.insert(CopyMI);
David Greene25133302007-06-08 17:18:56 +0000558 ++numPeep;
559 ++numJoins;
560 return true;
561}
562
563/// ComputeUltimateVN - Assuming we are going to join two live intervals,
564/// compute what the resultant value numbers for each value in the input two
565/// ranges will be. This is complicated by copies between the two which can
566/// and will commonly cause multiple value numbers to be merged into one.
567///
568/// VN is the value number that we're trying to resolve. InstDefiningValue
569/// keeps track of the new InstDefiningValue assignment for the result
570/// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
571/// whether a value in this or other is a copy from the opposite set.
572/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
573/// already been assigned.
574///
575/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
576/// contains the value number the copy is from.
577///
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000578static unsigned ComputeUltimateVN(VNInfo *VNI,
579 SmallVector<VNInfo*, 16> &NewVNInfo,
Evan Chengfadfb5b2007-08-31 21:23:06 +0000580 DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
581 DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
David Greene25133302007-06-08 17:18:56 +0000582 SmallVector<int, 16> &ThisValNoAssignments,
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000583 SmallVector<int, 16> &OtherValNoAssignments) {
584 unsigned VN = VNI->id;
585
David Greene25133302007-06-08 17:18:56 +0000586 // If the VN has already been computed, just return it.
587 if (ThisValNoAssignments[VN] >= 0)
588 return ThisValNoAssignments[VN];
589// assert(ThisValNoAssignments[VN] != -2 && "Cyclic case?");
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000590
David Greene25133302007-06-08 17:18:56 +0000591 // If this val is not a copy from the other val, then it must be a new value
592 // number in the destination.
Evan Chengfadfb5b2007-08-31 21:23:06 +0000593 DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
Evan Chengc14b1442007-08-31 08:04:17 +0000594 if (I == ThisFromOther.end()) {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000595 NewVNInfo.push_back(VNI);
596 return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
David Greene25133302007-06-08 17:18:56 +0000597 }
Evan Chengc14b1442007-08-31 08:04:17 +0000598 VNInfo *OtherValNo = I->second;
David Greene25133302007-06-08 17:18:56 +0000599
600 // Otherwise, this *is* a copy from the RHS. If the other side has already
601 // been computed, return it.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000602 if (OtherValNoAssignments[OtherValNo->id] >= 0)
603 return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
David Greene25133302007-06-08 17:18:56 +0000604
605 // Mark this value number as currently being computed, then ask what the
606 // ultimate value # of the other value is.
607 ThisValNoAssignments[VN] = -2;
608 unsigned UltimateVN =
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000609 ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
610 OtherValNoAssignments, ThisValNoAssignments);
David Greene25133302007-06-08 17:18:56 +0000611 return ThisValNoAssignments[VN] = UltimateVN;
612}
613
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000614static bool InVector(VNInfo *Val, const SmallVector<VNInfo*, 8> &V) {
David Greene25133302007-06-08 17:18:56 +0000615 return std::find(V.begin(), V.end(), Val) != V.end();
616}
617
618/// SimpleJoin - Attempt to joint the specified interval into this one. The
619/// caller of this method must guarantee that the RHS only contains a single
620/// value number and that the RHS is not defined by a copy from this
621/// interval. This returns false if the intervals are not joinable, or it
622/// joins them and returns true.
Bill Wendling2674d712008-01-04 08:59:18 +0000623bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
David Greene25133302007-06-08 17:18:56 +0000624 assert(RHS.containsOneValue());
625
626 // Some number (potentially more than one) value numbers in the current
627 // interval may be defined as copies from the RHS. Scan the overlapping
628 // portions of the LHS and RHS, keeping track of this and looking for
629 // overlapping live ranges that are NOT defined as copies. If these exist, we
Gabor Greife510b3a2007-07-09 12:00:59 +0000630 // cannot coalesce.
David Greene25133302007-06-08 17:18:56 +0000631
632 LiveInterval::iterator LHSIt = LHS.begin(), LHSEnd = LHS.end();
633 LiveInterval::iterator RHSIt = RHS.begin(), RHSEnd = RHS.end();
634
635 if (LHSIt->start < RHSIt->start) {
636 LHSIt = std::upper_bound(LHSIt, LHSEnd, RHSIt->start);
637 if (LHSIt != LHS.begin()) --LHSIt;
638 } else if (RHSIt->start < LHSIt->start) {
639 RHSIt = std::upper_bound(RHSIt, RHSEnd, LHSIt->start);
640 if (RHSIt != RHS.begin()) --RHSIt;
641 }
642
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000643 SmallVector<VNInfo*, 8> EliminatedLHSVals;
David Greene25133302007-06-08 17:18:56 +0000644
645 while (1) {
646 // Determine if these live intervals overlap.
647 bool Overlaps = false;
648 if (LHSIt->start <= RHSIt->start)
649 Overlaps = LHSIt->end > RHSIt->start;
650 else
651 Overlaps = RHSIt->end > LHSIt->start;
652
653 // If the live intervals overlap, there are two interesting cases: if the
654 // LHS interval is defined by a copy from the RHS, it's ok and we record
655 // that the LHS value # is the same as the RHS. If it's not, then we cannot
Gabor Greife510b3a2007-07-09 12:00:59 +0000656 // coalesce these live ranges and we bail out.
David Greene25133302007-06-08 17:18:56 +0000657 if (Overlaps) {
658 // If we haven't already recorded that this value # is safe, check it.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000659 if (!InVector(LHSIt->valno, EliminatedLHSVals)) {
David Greene25133302007-06-08 17:18:56 +0000660 // Copy from the RHS?
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000661 unsigned SrcReg = LHSIt->valno->reg;
David Greene25133302007-06-08 17:18:56 +0000662 if (rep(SrcReg) != RHS.reg)
663 return false; // Nope, bail out.
664
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000665 EliminatedLHSVals.push_back(LHSIt->valno);
David Greene25133302007-06-08 17:18:56 +0000666 }
667
668 // We know this entire LHS live range is okay, so skip it now.
669 if (++LHSIt == LHSEnd) break;
670 continue;
671 }
672
673 if (LHSIt->end < RHSIt->end) {
674 if (++LHSIt == LHSEnd) break;
675 } else {
676 // One interesting case to check here. It's possible that we have
677 // something like "X3 = Y" which defines a new value number in the LHS,
678 // and is the last use of this liverange of the RHS. In this case, we
Gabor Greife510b3a2007-07-09 12:00:59 +0000679 // want to notice this copy (so that it gets coalesced away) even though
David Greene25133302007-06-08 17:18:56 +0000680 // the live ranges don't actually overlap.
681 if (LHSIt->start == RHSIt->end) {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000682 if (InVector(LHSIt->valno, EliminatedLHSVals)) {
David Greene25133302007-06-08 17:18:56 +0000683 // We already know that this value number is going to be merged in
Gabor Greife510b3a2007-07-09 12:00:59 +0000684 // if coalescing succeeds. Just skip the liverange.
David Greene25133302007-06-08 17:18:56 +0000685 if (++LHSIt == LHSEnd) break;
686 } else {
687 // Otherwise, if this is a copy from the RHS, mark it as being merged
688 // in.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000689 if (rep(LHSIt->valno->reg) == RHS.reg) {
690 EliminatedLHSVals.push_back(LHSIt->valno);
David Greene25133302007-06-08 17:18:56 +0000691
692 // We know this entire LHS live range is okay, so skip it now.
693 if (++LHSIt == LHSEnd) break;
694 }
695 }
696 }
697
698 if (++RHSIt == RHSEnd) break;
699 }
700 }
701
Gabor Greife510b3a2007-07-09 12:00:59 +0000702 // If we got here, we know that the coalescing will be successful and that
David Greene25133302007-06-08 17:18:56 +0000703 // the value numbers in EliminatedLHSVals will all be merged together. Since
704 // the most common case is that EliminatedLHSVals has a single number, we
705 // optimize for it: if there is more than one value, we merge them all into
706 // the lowest numbered one, then handle the interval as if we were merging
707 // with one value number.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000708 VNInfo *LHSValNo;
David Greene25133302007-06-08 17:18:56 +0000709 if (EliminatedLHSVals.size() > 1) {
710 // Loop through all the equal value numbers merging them into the smallest
711 // one.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000712 VNInfo *Smallest = EliminatedLHSVals[0];
David Greene25133302007-06-08 17:18:56 +0000713 for (unsigned i = 1, e = EliminatedLHSVals.size(); i != e; ++i) {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000714 if (EliminatedLHSVals[i]->id < Smallest->id) {
David Greene25133302007-06-08 17:18:56 +0000715 // Merge the current notion of the smallest into the smaller one.
716 LHS.MergeValueNumberInto(Smallest, EliminatedLHSVals[i]);
717 Smallest = EliminatedLHSVals[i];
718 } else {
719 // Merge into the smallest.
720 LHS.MergeValueNumberInto(EliminatedLHSVals[i], Smallest);
721 }
722 }
723 LHSValNo = Smallest;
724 } else {
725 assert(!EliminatedLHSVals.empty() && "No copies from the RHS?");
726 LHSValNo = EliminatedLHSVals[0];
727 }
728
729 // Okay, now that there is a single LHS value number that we're merging the
730 // RHS into, update the value number info for the LHS to indicate that the
731 // value number is defined where the RHS value number was.
Evan Chengf3bb2e62007-09-05 21:46:51 +0000732 const VNInfo *VNI = RHS.getValNumInfo(0);
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000733 LHSValNo->def = VNI->def;
734 LHSValNo->reg = VNI->reg;
David Greene25133302007-06-08 17:18:56 +0000735
736 // Okay, the final step is to loop over the RHS live intervals, adding them to
737 // the LHS.
Evan Chengc3fc7d92007-11-29 09:49:23 +0000738 LHSValNo->hasPHIKill |= VNI->hasPHIKill;
Evan Chengf3bb2e62007-09-05 21:46:51 +0000739 LHS.addKills(LHSValNo, VNI->kills);
Evan Cheng430a7b02007-08-14 01:56:58 +0000740 LHS.MergeRangesInAsValue(RHS, LHSValNo);
David Greene25133302007-06-08 17:18:56 +0000741 LHS.weight += RHS.weight;
742 if (RHS.preference && !LHS.preference)
743 LHS.preference = RHS.preference;
744
745 return true;
746}
747
748/// JoinIntervals - Attempt to join these two intervals. On failure, this
749/// returns false. Otherwise, if one of the intervals being joined is a
750/// physreg, this method always canonicalizes LHS to be it. The output
751/// "RHS" will not have been modified, so we can use this information
752/// below to update aliases.
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000753bool SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS,
754 LiveInterval &RHS, bool &Swapped) {
David Greene25133302007-06-08 17:18:56 +0000755 // Compute the final value assignment, assuming that the live ranges can be
Gabor Greife510b3a2007-07-09 12:00:59 +0000756 // coalesced.
David Greene25133302007-06-08 17:18:56 +0000757 SmallVector<int, 16> LHSValNoAssignments;
758 SmallVector<int, 16> RHSValNoAssignments;
Evan Chengfadfb5b2007-08-31 21:23:06 +0000759 DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
760 DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000761 SmallVector<VNInfo*, 16> NewVNInfo;
David Greene25133302007-06-08 17:18:56 +0000762
763 // If a live interval is a physical register, conservatively check if any
764 // of its sub-registers is overlapping the live interval of the virtual
765 // register. If so, do not coalesce.
766 if (MRegisterInfo::isPhysicalRegister(LHS.reg) &&
767 *mri_->getSubRegisters(LHS.reg)) {
768 for (const unsigned* SR = mri_->getSubRegisters(LHS.reg); *SR; ++SR)
769 if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
770 DOUT << "Interfere with sub-register ";
771 DEBUG(li_->getInterval(*SR).print(DOUT, mri_));
772 return false;
773 }
774 } else if (MRegisterInfo::isPhysicalRegister(RHS.reg) &&
775 *mri_->getSubRegisters(RHS.reg)) {
776 for (const unsigned* SR = mri_->getSubRegisters(RHS.reg); *SR; ++SR)
777 if (li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
778 DOUT << "Interfere with sub-register ";
779 DEBUG(li_->getInterval(*SR).print(DOUT, mri_));
780 return false;
781 }
782 }
783
784 // Compute ultimate value numbers for the LHS and RHS values.
785 if (RHS.containsOneValue()) {
786 // Copies from a liveinterval with a single value are simple to handle and
787 // very common, handle the special case here. This is important, because
788 // often RHS is small and LHS is large (e.g. a physreg).
789
790 // Find out if the RHS is defined as a copy from some value in the LHS.
Evan Cheng4f8ff162007-08-11 00:59:19 +0000791 int RHSVal0DefinedFromLHS = -1;
David Greene25133302007-06-08 17:18:56 +0000792 int RHSValID = -1;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000793 VNInfo *RHSValNoInfo = NULL;
Evan Chengf3bb2e62007-09-05 21:46:51 +0000794 VNInfo *RHSValNoInfo0 = RHS.getValNumInfo(0);
Evan Chengc14b1442007-08-31 08:04:17 +0000795 unsigned RHSSrcReg = RHSValNoInfo0->reg;
David Greene25133302007-06-08 17:18:56 +0000796 if ((RHSSrcReg == 0 || rep(RHSSrcReg) != LHS.reg)) {
797 // If RHS is not defined as a copy from the LHS, we can use simpler and
Gabor Greife510b3a2007-07-09 12:00:59 +0000798 // faster checks to see if the live ranges are coalescable. This joiner
David Greene25133302007-06-08 17:18:56 +0000799 // can't swap the LHS/RHS intervals though.
800 if (!MRegisterInfo::isPhysicalRegister(RHS.reg)) {
801 return SimpleJoin(LHS, RHS);
802 } else {
Evan Chengc14b1442007-08-31 08:04:17 +0000803 RHSValNoInfo = RHSValNoInfo0;
David Greene25133302007-06-08 17:18:56 +0000804 }
805 } else {
806 // It was defined as a copy from the LHS, find out what value # it is.
Evan Chengc14b1442007-08-31 08:04:17 +0000807 RHSValNoInfo = LHS.getLiveRangeContaining(RHSValNoInfo0->def-1)->valno;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000808 RHSValID = RHSValNoInfo->id;
Evan Cheng4f8ff162007-08-11 00:59:19 +0000809 RHSVal0DefinedFromLHS = RHSValID;
David Greene25133302007-06-08 17:18:56 +0000810 }
811
812 LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
813 RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000814 NewVNInfo.resize(LHS.getNumValNums(), NULL);
David Greene25133302007-06-08 17:18:56 +0000815
816 // Okay, *all* of the values in LHS that are defined as a copy from RHS
817 // should now get updated.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000818 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
819 i != e; ++i) {
820 VNInfo *VNI = *i;
821 unsigned VN = VNI->id;
822 if (unsigned LHSSrcReg = VNI->reg) {
David Greene25133302007-06-08 17:18:56 +0000823 if (rep(LHSSrcReg) != RHS.reg) {
824 // If this is not a copy from the RHS, its value number will be
Gabor Greife510b3a2007-07-09 12:00:59 +0000825 // unmodified by the coalescing.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000826 NewVNInfo[VN] = VNI;
David Greene25133302007-06-08 17:18:56 +0000827 LHSValNoAssignments[VN] = VN;
828 } else if (RHSValID == -1) {
829 // Otherwise, it is a copy from the RHS, and we don't already have a
830 // value# for it. Keep the current value number, but remember it.
831 LHSValNoAssignments[VN] = RHSValID = VN;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000832 NewVNInfo[VN] = RHSValNoInfo;
Evan Chengc14b1442007-08-31 08:04:17 +0000833 LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
David Greene25133302007-06-08 17:18:56 +0000834 } else {
835 // Otherwise, use the specified value #.
836 LHSValNoAssignments[VN] = RHSValID;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000837 if (VN == (unsigned)RHSValID) { // Else this val# is dead.
838 NewVNInfo[VN] = RHSValNoInfo;
Evan Chengc14b1442007-08-31 08:04:17 +0000839 LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
Evan Cheng4f8ff162007-08-11 00:59:19 +0000840 }
David Greene25133302007-06-08 17:18:56 +0000841 }
842 } else {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000843 NewVNInfo[VN] = VNI;
David Greene25133302007-06-08 17:18:56 +0000844 LHSValNoAssignments[VN] = VN;
845 }
846 }
847
848 assert(RHSValID != -1 && "Didn't find value #?");
849 RHSValNoAssignments[0] = RHSValID;
Evan Cheng4f8ff162007-08-11 00:59:19 +0000850 if (RHSVal0DefinedFromLHS != -1) {
Evan Cheng34301352007-09-01 02:03:17 +0000851 // This path doesn't go through ComputeUltimateVN so just set
852 // it to anything.
853 RHSValsDefinedFromLHS[RHSValNoInfo0] = (VNInfo*)1;
Evan Cheng4f8ff162007-08-11 00:59:19 +0000854 }
David Greene25133302007-06-08 17:18:56 +0000855 } else {
856 // Loop over the value numbers of the LHS, seeing if any are defined from
857 // the RHS.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000858 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
859 i != e; ++i) {
860 VNInfo *VNI = *i;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000861 unsigned ValSrcReg = VNI->reg;
Evan Cheng5031fd22007-11-05 06:46:45 +0000862 if (VNI->def == ~1U ||ValSrcReg == 0) // Src not defined by a copy?
David Greene25133302007-06-08 17:18:56 +0000863 continue;
864
865 // DstReg is known to be a register in the LHS interval. If the src is
866 // from the RHS interval, we can use its value #.
867 if (rep(ValSrcReg) != RHS.reg)
868 continue;
869
870 // Figure out the value # from the RHS.
Bill Wendling2674d712008-01-04 08:59:18 +0000871 LHSValsDefinedFromRHS[VNI]=RHS.getLiveRangeContaining(VNI->def-1)->valno;
David Greene25133302007-06-08 17:18:56 +0000872 }
873
874 // Loop over the value numbers of the RHS, seeing if any are defined from
875 // the LHS.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000876 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
877 i != e; ++i) {
878 VNInfo *VNI = *i;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000879 unsigned ValSrcReg = VNI->reg;
Evan Cheng5031fd22007-11-05 06:46:45 +0000880 if (VNI->def == ~1U || ValSrcReg == 0) // Src not defined by a copy?
David Greene25133302007-06-08 17:18:56 +0000881 continue;
882
883 // DstReg is known to be a register in the RHS interval. If the src is
884 // from the LHS interval, we can use its value #.
885 if (rep(ValSrcReg) != LHS.reg)
886 continue;
887
888 // Figure out the value # from the LHS.
Bill Wendling2674d712008-01-04 08:59:18 +0000889 RHSValsDefinedFromLHS[VNI]=LHS.getLiveRangeContaining(VNI->def-1)->valno;
David Greene25133302007-06-08 17:18:56 +0000890 }
891
892 LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
893 RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000894 NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
David Greene25133302007-06-08 17:18:56 +0000895
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000896 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
897 i != e; ++i) {
898 VNInfo *VNI = *i;
899 unsigned VN = VNI->id;
900 if (LHSValNoAssignments[VN] >= 0 || VNI->def == ~1U)
David Greene25133302007-06-08 17:18:56 +0000901 continue;
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000902 ComputeUltimateVN(VNI, NewVNInfo,
David Greene25133302007-06-08 17:18:56 +0000903 LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000904 LHSValNoAssignments, RHSValNoAssignments);
David Greene25133302007-06-08 17:18:56 +0000905 }
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000906 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
907 i != e; ++i) {
908 VNInfo *VNI = *i;
909 unsigned VN = VNI->id;
910 if (RHSValNoAssignments[VN] >= 0 || VNI->def == ~1U)
David Greene25133302007-06-08 17:18:56 +0000911 continue;
912 // If this value number isn't a copy from the LHS, it's a new number.
Evan Chengc14b1442007-08-31 08:04:17 +0000913 if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000914 NewVNInfo.push_back(VNI);
915 RHSValNoAssignments[VN] = NewVNInfo.size()-1;
David Greene25133302007-06-08 17:18:56 +0000916 continue;
917 }
918
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000919 ComputeUltimateVN(VNI, NewVNInfo,
David Greene25133302007-06-08 17:18:56 +0000920 RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000921 RHSValNoAssignments, LHSValNoAssignments);
David Greene25133302007-06-08 17:18:56 +0000922 }
923 }
924
925 // Armed with the mappings of LHS/RHS values to ultimate values, walk the
Gabor Greife510b3a2007-07-09 12:00:59 +0000926 // interval lists to see if these intervals are coalescable.
David Greene25133302007-06-08 17:18:56 +0000927 LiveInterval::const_iterator I = LHS.begin();
928 LiveInterval::const_iterator IE = LHS.end();
929 LiveInterval::const_iterator J = RHS.begin();
930 LiveInterval::const_iterator JE = RHS.end();
931
932 // Skip ahead until the first place of potential sharing.
933 if (I->start < J->start) {
934 I = std::upper_bound(I, IE, J->start);
935 if (I != LHS.begin()) --I;
936 } else if (J->start < I->start) {
937 J = std::upper_bound(J, JE, I->start);
938 if (J != RHS.begin()) --J;
939 }
940
941 while (1) {
942 // Determine if these two live ranges overlap.
943 bool Overlaps;
944 if (I->start < J->start) {
945 Overlaps = I->end > J->start;
946 } else {
947 Overlaps = J->end > I->start;
948 }
949
950 // If so, check value # info to determine if they are really different.
951 if (Overlaps) {
952 // If the live range overlap will map to the same value number in the
Gabor Greife510b3a2007-07-09 12:00:59 +0000953 // result liverange, we can still coalesce them. If not, we can't.
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000954 if (LHSValNoAssignments[I->valno->id] !=
955 RHSValNoAssignments[J->valno->id])
David Greene25133302007-06-08 17:18:56 +0000956 return false;
957 }
958
959 if (I->end < J->end) {
960 ++I;
961 if (I == IE) break;
962 } else {
963 ++J;
964 if (J == JE) break;
965 }
966 }
967
Evan Cheng34729252007-10-14 10:08:34 +0000968 // Update kill info. Some live ranges are extended due to copy coalescing.
969 for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
970 E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
971 VNInfo *VNI = I->first;
972 unsigned LHSValID = LHSValNoAssignments[VNI->id];
973 LiveInterval::removeKill(NewVNInfo[LHSValID], VNI->def);
Evan Chengc3fc7d92007-11-29 09:49:23 +0000974 NewVNInfo[LHSValID]->hasPHIKill |= VNI->hasPHIKill;
Evan Cheng34729252007-10-14 10:08:34 +0000975 RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
976 }
977
978 // Update kill info. Some live ranges are extended due to copy coalescing.
979 for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
980 E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
981 VNInfo *VNI = I->first;
982 unsigned RHSValID = RHSValNoAssignments[VNI->id];
983 LiveInterval::removeKill(NewVNInfo[RHSValID], VNI->def);
Evan Chengc3fc7d92007-11-29 09:49:23 +0000984 NewVNInfo[RHSValID]->hasPHIKill |= VNI->hasPHIKill;
Evan Cheng34729252007-10-14 10:08:34 +0000985 LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
986 }
987
Gabor Greife510b3a2007-07-09 12:00:59 +0000988 // If we get here, we know that we can coalesce the live ranges. Ask the
989 // intervals to coalesce themselves now.
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000990 if ((RHS.ranges.size() > LHS.ranges.size() &&
991 MRegisterInfo::isVirtualRegister(LHS.reg)) ||
992 MRegisterInfo::isPhysicalRegister(RHS.reg)) {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000993 RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo);
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000994 Swapped = true;
995 } else {
Evan Cheng7ecb38b2007-08-29 20:45:00 +0000996 LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo);
Evan Cheng1a66f0a2007-08-28 08:28:51 +0000997 Swapped = false;
998 }
David Greene25133302007-06-08 17:18:56 +0000999 return true;
1000}
1001
1002namespace {
1003 // DepthMBBCompare - Comparison predicate that sort first based on the loop
1004 // depth of the basic block (the unsigned), and then on the MBB number.
1005 struct DepthMBBCompare {
1006 typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
1007 bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
1008 if (LHS.first > RHS.first) return true; // Deeper loops first
1009 return LHS.first == RHS.first &&
1010 LHS.second->getNumber() < RHS.second->getNumber();
1011 }
1012 };
1013}
1014
Evan Cheng8fc9a102007-11-06 08:52:21 +00001015/// getRepIntervalSize - Returns the size of the interval that represents the
1016/// specified register.
1017template<class SF>
1018unsigned JoinPriorityQueue<SF>::getRepIntervalSize(unsigned Reg) {
1019 return Rc->getRepIntervalSize(Reg);
1020}
1021
1022/// CopyRecSort::operator - Join priority queue sorting function.
1023///
1024bool CopyRecSort::operator()(CopyRec left, CopyRec right) const {
1025 // Inner loops first.
1026 if (left.LoopDepth > right.LoopDepth)
1027 return false;
1028 else if (left.LoopDepth == right.LoopDepth) {
1029 if (left.isBackEdge && !right.isBackEdge)
1030 return false;
1031 else if (left.isBackEdge == right.isBackEdge) {
1032 // Join virtuals to physical registers first.
1033 bool LDstIsPhys = MRegisterInfo::isPhysicalRegister(left.DstReg);
1034 bool LSrcIsPhys = MRegisterInfo::isPhysicalRegister(left.SrcReg);
1035 bool LIsPhys = LDstIsPhys || LSrcIsPhys;
1036 bool RDstIsPhys = MRegisterInfo::isPhysicalRegister(right.DstReg);
1037 bool RSrcIsPhys = MRegisterInfo::isPhysicalRegister(right.SrcReg);
1038 bool RIsPhys = RDstIsPhys || RSrcIsPhys;
1039 if (LIsPhys && !RIsPhys)
1040 return false;
1041 else if (LIsPhys == RIsPhys) {
1042 // Join shorter intervals first.
1043 unsigned LSize = 0;
1044 unsigned RSize = 0;
1045 if (LIsPhys) {
1046 LSize = LDstIsPhys ? 0 : JPQ->getRepIntervalSize(left.DstReg);
1047 LSize += LSrcIsPhys ? 0 : JPQ->getRepIntervalSize(left.SrcReg);
1048 RSize = RDstIsPhys ? 0 : JPQ->getRepIntervalSize(right.DstReg);
1049 RSize += RSrcIsPhys ? 0 : JPQ->getRepIntervalSize(right.SrcReg);
1050 } else {
1051 LSize = std::min(JPQ->getRepIntervalSize(left.DstReg),
1052 JPQ->getRepIntervalSize(left.SrcReg));
1053 RSize = std::min(JPQ->getRepIntervalSize(right.DstReg),
1054 JPQ->getRepIntervalSize(right.SrcReg));
1055 }
1056 if (LSize < RSize)
1057 return false;
1058 }
1059 }
1060 }
1061 return true;
1062}
1063
Gabor Greife510b3a2007-07-09 12:00:59 +00001064void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
Evan Cheng8b0b8742007-10-16 08:04:24 +00001065 std::vector<CopyRec> &TryAgain) {
David Greene25133302007-06-08 17:18:56 +00001066 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
Evan Cheng8fc9a102007-11-06 08:52:21 +00001067
Evan Cheng8b0b8742007-10-16 08:04:24 +00001068 std::vector<CopyRec> VirtCopies;
1069 std::vector<CopyRec> PhysCopies;
Evan Cheng22f07ff2007-12-11 02:09:15 +00001070 unsigned LoopDepth = loopInfo->getLoopDepth(MBB);
David Greene25133302007-06-08 17:18:56 +00001071 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
1072 MII != E;) {
1073 MachineInstr *Inst = MII++;
1074
Evan Cheng32dfbea2007-10-12 08:50:34 +00001075 // If this isn't a copy nor a extract_subreg, we can't join intervals.
David Greene25133302007-06-08 17:18:56 +00001076 unsigned SrcReg, DstReg;
Evan Cheng32dfbea2007-10-12 08:50:34 +00001077 if (Inst->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1078 DstReg = Inst->getOperand(0).getReg();
1079 SrcReg = Inst->getOperand(1).getReg();
1080 } else if (!tii_->isMoveInstr(*Inst, SrcReg, DstReg))
1081 continue;
Evan Cheng8b0b8742007-10-16 08:04:24 +00001082
1083 unsigned repSrcReg = rep(SrcReg);
1084 unsigned repDstReg = rep(DstReg);
1085 bool SrcIsPhys = MRegisterInfo::isPhysicalRegister(repSrcReg);
1086 bool DstIsPhys = MRegisterInfo::isPhysicalRegister(repDstReg);
Evan Cheng8fc9a102007-11-06 08:52:21 +00001087 if (NewHeuristic) {
1088 JoinQueue->push(CopyRec(Inst, SrcReg, DstReg, LoopDepth,
1089 isBackEdgeCopy(Inst, DstReg)));
1090 } else {
1091 if (SrcIsPhys || DstIsPhys)
1092 PhysCopies.push_back(CopyRec(Inst, SrcReg, DstReg, 0, false));
1093 else
1094 VirtCopies.push_back(CopyRec(Inst, SrcReg, DstReg, 0, false));
1095 }
Evan Cheng8b0b8742007-10-16 08:04:24 +00001096 }
1097
Evan Cheng8fc9a102007-11-06 08:52:21 +00001098 if (NewHeuristic)
1099 return;
1100
Evan Cheng8b0b8742007-10-16 08:04:24 +00001101 // Try coalescing physical register + virtual register first.
1102 for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
1103 CopyRec &TheCopy = PhysCopies[i];
Evan Cheng0547bab2007-11-01 06:22:48 +00001104 bool Again = false;
Evan Cheng8fc9a102007-11-06 08:52:21 +00001105 if (!JoinCopy(TheCopy, Again))
Evan Cheng0547bab2007-11-01 06:22:48 +00001106 if (Again)
1107 TryAgain.push_back(TheCopy);
Evan Cheng8b0b8742007-10-16 08:04:24 +00001108 }
1109 for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
1110 CopyRec &TheCopy = VirtCopies[i];
Evan Cheng0547bab2007-11-01 06:22:48 +00001111 bool Again = false;
Evan Cheng8fc9a102007-11-06 08:52:21 +00001112 if (!JoinCopy(TheCopy, Again))
Evan Cheng0547bab2007-11-01 06:22:48 +00001113 if (Again)
1114 TryAgain.push_back(TheCopy);
David Greene25133302007-06-08 17:18:56 +00001115 }
1116}
1117
1118void SimpleRegisterCoalescing::joinIntervals() {
1119 DOUT << "********** JOINING INTERVALS ***********\n";
1120
Evan Cheng8fc9a102007-11-06 08:52:21 +00001121 if (NewHeuristic)
1122 JoinQueue = new JoinPriorityQueue<CopyRecSort>(this);
1123
David Greene25133302007-06-08 17:18:56 +00001124 JoinedLIs.resize(li_->getNumIntervals());
1125 JoinedLIs.reset();
1126
1127 std::vector<CopyRec> TryAgainList;
Evan Cheng8fc9a102007-11-06 08:52:21 +00001128 if (loopInfo->begin() == loopInfo->end()) {
David Greene25133302007-06-08 17:18:56 +00001129 // If there are no loops in the function, join intervals in function order.
1130 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
1131 I != E; ++I)
Evan Cheng8b0b8742007-10-16 08:04:24 +00001132 CopyCoalesceInMBB(I, TryAgainList);
David Greene25133302007-06-08 17:18:56 +00001133 } else {
1134 // Otherwise, join intervals in inner loops before other intervals.
1135 // Unfortunately we can't just iterate over loop hierarchy here because
1136 // there may be more MBB's than BB's. Collect MBB's for sorting.
1137
1138 // Join intervals in the function prolog first. We want to join physical
1139 // registers with virtual registers before the intervals got too long.
1140 std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
Evan Cheng22f07ff2007-12-11 02:09:15 +00001141 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
1142 MachineBasicBlock *MBB = I;
1143 MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
1144 }
David Greene25133302007-06-08 17:18:56 +00001145
1146 // Sort by loop depth.
1147 std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
1148
1149 // Finally, join intervals in loop nest order.
1150 for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
Evan Cheng8b0b8742007-10-16 08:04:24 +00001151 CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
David Greene25133302007-06-08 17:18:56 +00001152 }
1153
1154 // Joining intervals can allow other intervals to be joined. Iteratively join
1155 // until we make no progress.
Evan Cheng8fc9a102007-11-06 08:52:21 +00001156 if (NewHeuristic) {
1157 SmallVector<CopyRec, 16> TryAgain;
1158 bool ProgressMade = true;
1159 while (ProgressMade) {
1160 ProgressMade = false;
1161 while (!JoinQueue->empty()) {
1162 CopyRec R = JoinQueue->pop();
Evan Cheng0547bab2007-11-01 06:22:48 +00001163 bool Again = false;
Evan Cheng8fc9a102007-11-06 08:52:21 +00001164 bool Success = JoinCopy(R, Again);
1165 if (Success)
Evan Cheng0547bab2007-11-01 06:22:48 +00001166 ProgressMade = true;
Evan Cheng8fc9a102007-11-06 08:52:21 +00001167 else if (Again)
1168 TryAgain.push_back(R);
1169 }
1170
1171 if (ProgressMade) {
1172 while (!TryAgain.empty()) {
1173 JoinQueue->push(TryAgain.back());
1174 TryAgain.pop_back();
1175 }
1176 }
1177 }
1178 } else {
1179 bool ProgressMade = true;
1180 while (ProgressMade) {
1181 ProgressMade = false;
1182
1183 for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
1184 CopyRec &TheCopy = TryAgainList[i];
1185 if (TheCopy.MI) {
1186 bool Again = false;
1187 bool Success = JoinCopy(TheCopy, Again);
1188 if (Success || !Again) {
1189 TheCopy.MI = 0; // Mark this one as done.
1190 ProgressMade = true;
1191 }
Evan Cheng0547bab2007-11-01 06:22:48 +00001192 }
David Greene25133302007-06-08 17:18:56 +00001193 }
1194 }
1195 }
1196
1197 // Some live range has been lengthened due to colaescing, eliminate the
1198 // unnecessary kills.
1199 int RegNum = JoinedLIs.find_first();
1200 while (RegNum != -1) {
1201 unsigned Reg = RegNum + MRegisterInfo::FirstVirtualRegister;
1202 unsigned repReg = rep(Reg);
1203 LiveInterval &LI = li_->getInterval(repReg);
1204 LiveVariables::VarInfo& svi = lv_->getVarInfo(Reg);
1205 for (unsigned i = 0, e = svi.Kills.size(); i != e; ++i) {
1206 MachineInstr *Kill = svi.Kills[i];
1207 // Suppose vr1 = op vr2, x
1208 // and vr1 and vr2 are coalesced. vr2 should still be marked kill
1209 // unless it is a two-address operand.
1210 if (li_->isRemoved(Kill) || hasRegisterDef(Kill, repReg))
1211 continue;
1212 if (LI.liveAt(li_->getInstructionIndex(Kill) + InstrSlots::NUM))
1213 unsetRegisterKill(Kill, repReg);
1214 }
1215 RegNum = JoinedLIs.find_next(RegNum);
1216 }
Evan Cheng8fc9a102007-11-06 08:52:21 +00001217
1218 if (NewHeuristic)
1219 delete JoinQueue;
David Greene25133302007-06-08 17:18:56 +00001220
1221 DOUT << "*** Register mapping ***\n";
Evan Cheng4ae31a52007-10-18 07:49:59 +00001222 for (unsigned i = 0, e = r2rMap_.size(); i != e; ++i)
David Greene25133302007-06-08 17:18:56 +00001223 if (r2rMap_[i]) {
1224 DOUT << " reg " << i << " -> ";
1225 DEBUG(printRegName(r2rMap_[i]));
1226 DOUT << "\n";
1227 }
1228}
1229
1230/// Return true if the two specified registers belong to different register
1231/// classes. The registers may be either phys or virt regs.
1232bool SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
Evan Cheng32dfbea2007-10-12 08:50:34 +00001233 unsigned RegB) const {
David Greene25133302007-06-08 17:18:56 +00001234
1235 // Get the register classes for the first reg.
1236 if (MRegisterInfo::isPhysicalRegister(RegA)) {
1237 assert(MRegisterInfo::isVirtualRegister(RegB) &&
1238 "Shouldn't consider two physregs!");
Chris Lattner84bc5422007-12-31 04:13:23 +00001239 return !mf_->getRegInfo().getRegClass(RegB)->contains(RegA);
David Greene25133302007-06-08 17:18:56 +00001240 }
1241
1242 // Compare against the regclass for the second reg.
Chris Lattner84bc5422007-12-31 04:13:23 +00001243 const TargetRegisterClass *RegClass = mf_->getRegInfo().getRegClass(RegA);
David Greene25133302007-06-08 17:18:56 +00001244 if (MRegisterInfo::isVirtualRegister(RegB))
Chris Lattner84bc5422007-12-31 04:13:23 +00001245 return RegClass != mf_->getRegInfo().getRegClass(RegB);
David Greene25133302007-06-08 17:18:56 +00001246 else
1247 return !RegClass->contains(RegB);
1248}
1249
1250/// lastRegisterUse - Returns the last use of the specific register between
1251/// cycles Start and End. It also returns the use operand by reference. It
1252/// returns NULL if there are no uses.
1253MachineInstr *
Chris Lattner84bc5422007-12-31 04:13:23 +00001254SimpleRegisterCoalescing::lastRegisterUse(unsigned Start, unsigned End,
1255 unsigned Reg, MachineOperand *&MOU) {
David Greene25133302007-06-08 17:18:56 +00001256 int e = (End-1) / InstrSlots::NUM * InstrSlots::NUM;
1257 int s = Start;
1258 while (e >= s) {
1259 // Skip deleted instructions
1260 MachineInstr *MI = li_->getInstructionFromIndex(e);
1261 while ((e - InstrSlots::NUM) >= s && !MI) {
1262 e -= InstrSlots::NUM;
1263 MI = li_->getInstructionFromIndex(e);
1264 }
1265 if (e < s || MI == NULL)
1266 return NULL;
1267
1268 for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
1269 MachineOperand &MO = MI->getOperand(i);
Dan Gohman92dfe202007-09-14 20:33:02 +00001270 if (MO.isRegister() && MO.isUse() && MO.getReg() &&
David Greene25133302007-06-08 17:18:56 +00001271 mri_->regsOverlap(rep(MO.getReg()), Reg)) {
1272 MOU = &MO;
1273 return MI;
1274 }
1275 }
1276
1277 e -= InstrSlots::NUM;
1278 }
1279
1280 return NULL;
1281}
1282
1283
1284/// findDefOperand - Returns the MachineOperand that is a def of the specific
1285/// register. It returns NULL if the def is not found.
Bill Wendling2674d712008-01-04 08:59:18 +00001286MachineOperand *SimpleRegisterCoalescing::findDefOperand(MachineInstr *MI,
1287 unsigned Reg) {
David Greene25133302007-06-08 17:18:56 +00001288 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1289 MachineOperand &MO = MI->getOperand(i);
Dan Gohman92dfe202007-09-14 20:33:02 +00001290 if (MO.isRegister() && MO.isDef() &&
David Greene25133302007-06-08 17:18:56 +00001291 mri_->regsOverlap(rep(MO.getReg()), Reg))
1292 return &MO;
1293 }
1294 return NULL;
1295}
1296
1297/// unsetRegisterKill - Unset IsKill property of all uses of specific register
1298/// of the specific instruction.
Bill Wendling2674d712008-01-04 08:59:18 +00001299void SimpleRegisterCoalescing::unsetRegisterKill(MachineInstr *MI,
1300 unsigned Reg) {
David Greene25133302007-06-08 17:18:56 +00001301 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1302 MachineOperand &MO = MI->getOperand(i);
Dan Gohman92dfe202007-09-14 20:33:02 +00001303 if (MO.isRegister() && MO.isKill() && MO.getReg() &&
David Greene25133302007-06-08 17:18:56 +00001304 mri_->regsOverlap(rep(MO.getReg()), Reg))
Chris Lattnerf7382302007-12-30 21:56:09 +00001305 MO.setIsKill(false);
David Greene25133302007-06-08 17:18:56 +00001306 }
1307}
1308
1309/// unsetRegisterKills - Unset IsKill property of all uses of specific register
1310/// between cycles Start and End.
1311void SimpleRegisterCoalescing::unsetRegisterKills(unsigned Start, unsigned End,
Bill Wendling2674d712008-01-04 08:59:18 +00001312 unsigned Reg) {
David Greene25133302007-06-08 17:18:56 +00001313 int e = (End-1) / InstrSlots::NUM * InstrSlots::NUM;
1314 int s = Start;
1315 while (e >= s) {
1316 // Skip deleted instructions
1317 MachineInstr *MI = li_->getInstructionFromIndex(e);
1318 while ((e - InstrSlots::NUM) >= s && !MI) {
1319 e -= InstrSlots::NUM;
1320 MI = li_->getInstructionFromIndex(e);
1321 }
1322 if (e < s || MI == NULL)
1323 return;
1324
1325 for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
1326 MachineOperand &MO = MI->getOperand(i);
Dan Gohman92dfe202007-09-14 20:33:02 +00001327 if (MO.isRegister() && MO.isKill() && MO.getReg() &&
David Greene25133302007-06-08 17:18:56 +00001328 mri_->regsOverlap(rep(MO.getReg()), Reg)) {
Chris Lattnerf7382302007-12-30 21:56:09 +00001329 MO.setIsKill(false);
David Greene25133302007-06-08 17:18:56 +00001330 }
1331 }
1332
1333 e -= InstrSlots::NUM;
1334 }
1335}
1336
1337/// hasRegisterDef - True if the instruction defines the specific register.
1338///
1339bool SimpleRegisterCoalescing::hasRegisterDef(MachineInstr *MI, unsigned Reg) {
1340 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1341 MachineOperand &MO = MI->getOperand(i);
Dan Gohman92dfe202007-09-14 20:33:02 +00001342 if (MO.isRegister() && MO.isDef() &&
David Greene25133302007-06-08 17:18:56 +00001343 mri_->regsOverlap(rep(MO.getReg()), Reg))
1344 return true;
1345 }
1346 return false;
1347}
1348
1349void SimpleRegisterCoalescing::printRegName(unsigned reg) const {
1350 if (MRegisterInfo::isPhysicalRegister(reg))
1351 cerr << mri_->getName(reg);
1352 else
1353 cerr << "%reg" << reg;
1354}
1355
1356void SimpleRegisterCoalescing::releaseMemory() {
Evan Cheng4ae31a52007-10-18 07:49:59 +00001357 for (unsigned i = 0, e = r2rMap_.size(); i != e; ++i)
1358 r2rRevMap_[i].clear();
1359 r2rRevMap_.clear();
1360 r2rMap_.clear();
1361 JoinedLIs.clear();
1362 SubRegIdxes.clear();
Evan Cheng8fc9a102007-11-06 08:52:21 +00001363 JoinedCopies.clear();
David Greene25133302007-06-08 17:18:56 +00001364}
1365
1366static bool isZeroLengthInterval(LiveInterval *li) {
1367 for (LiveInterval::Ranges::const_iterator
1368 i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
1369 if (i->end - i->start > LiveIntervals::InstrSlots::NUM)
1370 return false;
1371 return true;
1372}
1373
1374bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
1375 mf_ = &fn;
1376 tm_ = &fn.getTarget();
1377 mri_ = tm_->getRegisterInfo();
1378 tii_ = tm_->getInstrInfo();
1379 li_ = &getAnalysis<LiveIntervals>();
1380 lv_ = &getAnalysis<LiveVariables>();
Evan Cheng22f07ff2007-12-11 02:09:15 +00001381 loopInfo = &getAnalysis<MachineLoopInfo>();
David Greene25133302007-06-08 17:18:56 +00001382
1383 DOUT << "********** SIMPLE REGISTER COALESCING **********\n"
1384 << "********** Function: "
1385 << ((Value*)mf_->getFunction())->getName() << '\n';
1386
1387 allocatableRegs_ = mri_->getAllocatableSet(fn);
1388 for (MRegisterInfo::regclass_iterator I = mri_->regclass_begin(),
1389 E = mri_->regclass_end(); I != E; ++I)
Bill Wendling2674d712008-01-04 08:59:18 +00001390 allocatableRCRegs_.insert(std::make_pair(*I,
1391 mri_->getAllocatableSet(fn, *I)));
David Greene25133302007-06-08 17:18:56 +00001392
Chris Lattner84bc5422007-12-31 04:13:23 +00001393 MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1394 r2rMap_.grow(RegInfo.getLastVirtReg());
1395 r2rRevMap_.grow(RegInfo.getLastVirtReg());
David Greene25133302007-06-08 17:18:56 +00001396
Gabor Greife510b3a2007-07-09 12:00:59 +00001397 // Join (coalesce) intervals if requested.
Evan Chengc498b022007-11-14 07:59:08 +00001398 IndexedMap<unsigned, VirtReg2IndexFunctor> RegSubIdxMap;
David Greene25133302007-06-08 17:18:56 +00001399 if (EnableJoining) {
1400 joinIntervals();
1401 DOUT << "********** INTERVALS POST JOINING **********\n";
Bill Wendling2674d712008-01-04 08:59:18 +00001402 for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I){
David Greene25133302007-06-08 17:18:56 +00001403 I->second.print(DOUT, mri_);
1404 DOUT << "\n";
1405 }
Evan Cheng32dfbea2007-10-12 08:50:34 +00001406
Evan Cheng8fc9a102007-11-06 08:52:21 +00001407 // Delete all coalesced copies.
1408 for (SmallPtrSet<MachineInstr*,32>::iterator I = JoinedCopies.begin(),
1409 E = JoinedCopies.end(); I != E; ++I) {
1410 li_->RemoveMachineInstrFromMaps(*I);
1411 (*I)->eraseFromParent();
1412 }
1413
Chris Lattner534bcfb2007-12-31 04:16:08 +00001414 // Transfer sub-registers info to MachineRegisterInfo now that coalescing
1415 // information is complete.
Chris Lattner84bc5422007-12-31 04:13:23 +00001416 RegSubIdxMap.grow(RegInfo.getLastVirtReg()+1);
Evan Cheng32dfbea2007-10-12 08:50:34 +00001417 while (!SubRegIdxes.empty()) {
1418 std::pair<unsigned, unsigned> RI = SubRegIdxes.back();
1419 SubRegIdxes.pop_back();
Evan Chengc498b022007-11-14 07:59:08 +00001420 RegSubIdxMap[RI.first] = RI.second;
Evan Cheng32dfbea2007-10-12 08:50:34 +00001421 }
David Greene25133302007-06-08 17:18:56 +00001422 }
1423
1424 // perform a final pass over the instructions and compute spill
1425 // weights, coalesce virtual registers and remove identity moves.
David Greene25133302007-06-08 17:18:56 +00001426 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
1427 mbbi != mbbe; ++mbbi) {
1428 MachineBasicBlock* mbb = mbbi;
Evan Cheng22f07ff2007-12-11 02:09:15 +00001429 unsigned loopDepth = loopInfo->getLoopDepth(mbb);
David Greene25133302007-06-08 17:18:56 +00001430
1431 for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
1432 mii != mie; ) {
1433 // if the move will be an identity move delete it
1434 unsigned srcReg, dstReg, RegRep;
1435 if (tii_->isMoveInstr(*mii, srcReg, dstReg) &&
1436 (RegRep = rep(srcReg)) == rep(dstReg)) {
1437 // remove from def list
1438 LiveInterval &RegInt = li_->getOrCreateInterval(RegRep);
1439 MachineOperand *MO = mii->findRegisterDefOperand(dstReg);
1440 // If def of this move instruction is dead, remove its live range from
1441 // the dstination register's live interval.
1442 if (MO->isDead()) {
1443 unsigned MoveIdx = li_->getDefIndex(li_->getInstructionIndex(mii));
1444 LiveInterval::iterator MLR = RegInt.FindLiveRangeContaining(MoveIdx);
1445 RegInt.removeRange(MLR->start, MoveIdx+1);
1446 if (RegInt.empty())
1447 li_->removeInterval(RegRep);
1448 }
1449 li_->RemoveMachineInstrFromMaps(mii);
1450 mii = mbbi->erase(mii);
1451 ++numPeep;
1452 } else {
1453 SmallSet<unsigned, 4> UniqueUses;
1454 for (unsigned i = 0, e = mii->getNumOperands(); i != e; ++i) {
1455 const MachineOperand &mop = mii->getOperand(i);
1456 if (mop.isRegister() && mop.getReg() &&
1457 MRegisterInfo::isVirtualRegister(mop.getReg())) {
1458 // replace register with representative register
Evan Cheng32dfbea2007-10-12 08:50:34 +00001459 unsigned OrigReg = mop.getReg();
1460 unsigned reg = rep(OrigReg);
Evan Chengc498b022007-11-14 07:59:08 +00001461 unsigned SubIdx = RegSubIdxMap[OrigReg];
1462 if (SubIdx && MRegisterInfo::isPhysicalRegister(reg))
1463 mii->getOperand(i).setReg(mri_->getSubReg(reg, SubIdx));
1464 else {
Evan Cheng32dfbea2007-10-12 08:50:34 +00001465 mii->getOperand(i).setReg(reg);
Evan Chengc498b022007-11-14 07:59:08 +00001466 mii->getOperand(i).setSubReg(SubIdx);
1467 }
David Greene25133302007-06-08 17:18:56 +00001468
1469 // Multiple uses of reg by the same instruction. It should not
1470 // contribute to spill weight again.
1471 if (UniqueUses.count(reg) != 0)
1472 continue;
1473 LiveInterval &RegInt = li_->getInterval(reg);
Evan Cheng81a03822007-11-17 00:40:40 +00001474 RegInt.weight +=
1475 li_->getSpillWeight(mop.isDef(), mop.isUse(), loopDepth);
David Greene25133302007-06-08 17:18:56 +00001476 UniqueUses.insert(reg);
1477 }
1478 }
1479 ++mii;
1480 }
1481 }
1482 }
1483
1484 for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I) {
1485 LiveInterval &LI = I->second;
1486 if (MRegisterInfo::isVirtualRegister(LI.reg)) {
1487 // If the live interval length is essentially zero, i.e. in every live
1488 // range the use follows def immediately, it doesn't make sense to spill
1489 // it and hope it will be easier to allocate for this li.
1490 if (isZeroLengthInterval(&LI))
1491 LI.weight = HUGE_VALF;
Evan Cheng5ef3a042007-12-06 00:01:56 +00001492 else {
1493 bool isLoad = false;
Evan Cheng63a18c42008-02-09 08:36:28 +00001494 if (li_->isReMaterializable(LI, isLoad)) {
Evan Cheng5ef3a042007-12-06 00:01:56 +00001495 // If all of the definitions of the interval are re-materializable,
1496 // it is a preferred candidate for spilling. If non of the defs are
1497 // loads, then it's potentially very cheap to re-materialize.
1498 // FIXME: this gets much more complicated once we support non-trivial
1499 // re-materialization.
1500 if (isLoad)
1501 LI.weight *= 0.9F;
1502 else
1503 LI.weight *= 0.5F;
1504 }
1505 }
David Greene25133302007-06-08 17:18:56 +00001506
1507 // Slightly prefer live interval that has been assigned a preferred reg.
1508 if (LI.preference)
1509 LI.weight *= 1.01F;
1510
1511 // Divide the weight of the interval by its size. This encourages
1512 // spilling of intervals that are large and have few uses, and
1513 // discourages spilling of small intervals with many uses.
1514 LI.weight /= LI.getSize();
1515 }
1516 }
1517
1518 DEBUG(dump());
1519 return true;
1520}
1521
1522/// print - Implement the dump method.
1523void SimpleRegisterCoalescing::print(std::ostream &O, const Module* m) const {
1524 li_->print(O, m);
1525}
David Greene2c17c4d2007-09-06 16:18:45 +00001526
1527RegisterCoalescer* llvm::createSimpleRegisterCoalescer() {
1528 return new SimpleRegisterCoalescing();
1529}
1530
1531// Make sure that anything that uses RegisterCoalescer pulls in this file...
1532DEFINING_FILE_FOR(SimpleRegisterCoalescing)