blob: 03d1372c2f961220ba6ebb8b41c7afab4fda8c8b [file] [log] [blame]
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the RAGreedy function pass for register allocation in
11// optimized builds.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "regalloc"
Jakob Stoklund Olesendd479e92010-12-10 22:21:05 +000016#include "AllocationOrder.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000017#include "LiveIntervalUnion.h"
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +000018#include "LiveRangeEdit.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000019#include "RegAllocBase.h"
20#include "Spiller.h"
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +000021#include "SpillPlacement.h"
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +000022#include "SplitKit.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000023#include "VirtRegMap.h"
Jakob Stoklund Olesen0db841f2011-02-17 22:53:48 +000024#include "llvm/ADT/Statistic.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000025#include "llvm/Analysis/AliasAnalysis.h"
26#include "llvm/Function.h"
27#include "llvm/PassAnalysisSupport.h"
28#include "llvm/CodeGen/CalcSpillWeights.h"
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +000029#include "llvm/CodeGen/EdgeBundles.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000030#include "llvm/CodeGen/LiveIntervalAnalysis.h"
31#include "llvm/CodeGen/LiveStackAnalysis.h"
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +000032#include "llvm/CodeGen/MachineDominators.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000033#include "llvm/CodeGen/MachineFunctionPass.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000034#include "llvm/CodeGen/MachineLoopInfo.h"
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +000035#include "llvm/CodeGen/MachineLoopRanges.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000036#include "llvm/CodeGen/MachineRegisterInfo.h"
37#include "llvm/CodeGen/Passes.h"
38#include "llvm/CodeGen/RegAllocRegistry.h"
39#include "llvm/CodeGen/RegisterCoalescer.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000040#include "llvm/Target/TargetOptions.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000041#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/raw_ostream.h"
Jakob Stoklund Olesen533f58e2010-12-11 00:19:56 +000044#include "llvm/Support/Timer.h"
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000045
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +000046#include <queue>
47
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000048using namespace llvm;
49
Jakob Stoklund Olesen0db841f2011-02-17 22:53:48 +000050STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51STATISTIC(NumLocalSplits, "Number of split local live ranges");
52STATISTIC(NumReassigned, "Number of interferences reassigned");
53STATISTIC(NumEvicted, "Number of interferences evicted");
54
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000055static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
57
58namespace {
59class RAGreedy : public MachineFunctionPass, public RegAllocBase {
60 // context
61 MachineFunction *MF;
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000062 BitVector ReservedRegs;
63
64 // analyses
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +000065 SlotIndexes *Indexes;
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000066 LiveStacks *LS;
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +000067 MachineDominatorTree *DomTree;
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +000068 MachineLoopInfo *Loops;
69 MachineLoopRanges *LoopRanges;
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +000070 EdgeBundles *Bundles;
71 SpillPlacement *SpillPlacer;
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +000072
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000073 // state
74 std::auto_ptr<Spiller> SpillerInstance;
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +000075 std::auto_ptr<SplitAnalysis> SA;
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +000076 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
Jakob Stoklund Olesend2a50732011-02-23 00:56:56 +000077 IndexedMap<unsigned, VirtReg2IndexFunctor> Generation;
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000078
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +000079 // splitting state.
80
81 /// All basic blocks where the current register is live.
82 SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
83
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +000084 /// For every instruction in SA->UseSlots, store the previous non-copy
85 /// instruction.
86 SmallVector<SlotIndex, 8> PrevSlot;
87
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000088public:
89 RAGreedy();
90
91 /// Return the pass name.
92 virtual const char* getPassName() const {
Jakob Stoklund Olesen533f58e2010-12-11 00:19:56 +000093 return "Greedy Register Allocator";
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000094 }
95
96 /// RAGreedy analysis usage.
97 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000098 virtual void releaseMemory();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +000099 virtual Spiller &spiller() { return *SpillerInstance; }
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +0000100 virtual void enqueue(LiveInterval *LI);
101 virtual LiveInterval *dequeue();
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000102 virtual unsigned selectOrSplit(LiveInterval&,
103 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000104
105 /// Perform register allocation.
106 virtual bool runOnMachineFunction(MachineFunction &mf);
107
108 static char ID;
Andrew Trickb853e6c2010-12-09 18:15:21 +0000109
110private:
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000111 bool checkUncachedInterference(LiveInterval&, unsigned);
112 LiveInterval *getSingleInterference(LiveInterval&, unsigned);
Andrew Trickb853e6c2010-12-09 18:15:21 +0000113 bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +0000114 float calcInterferenceWeight(LiveInterval&, unsigned);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000115 float calcInterferenceInfo(LiveInterval&, unsigned);
116 float calcGlobalSplitCost(const BitVector&);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000117 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
118 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +0000119 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
120 SlotIndex getPrevMappedIndex(const MachineInstr*);
121 void calcPrevSlots();
122 unsigned nextSplitPoint(unsigned);
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000123 bool canEvictInterference(LiveInterval&, unsigned, unsigned, float&);
Jakob Stoklund Olesenb64d92e2010-12-14 00:37:44 +0000124
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000125 unsigned tryReassign(LiveInterval&, AllocationOrder&,
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000126 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000127 unsigned tryEvict(LiveInterval&, AllocationOrder&,
128 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000129 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
130 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +0000131 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
132 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesenb64d92e2010-12-14 00:37:44 +0000133 unsigned trySplit(LiveInterval&, AllocationOrder&,
134 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +0000135 unsigned trySpillInterferences(LiveInterval&, AllocationOrder&,
136 SmallVectorImpl<LiveInterval*>&);
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000137};
138} // end anonymous namespace
139
140char RAGreedy::ID = 0;
141
142FunctionPass* llvm::createGreedyRegisterAllocator() {
143 return new RAGreedy();
144}
145
146RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000147 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000148 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
149 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
150 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
151 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
152 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
153 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
154 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
155 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +0000156 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000157 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000158 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
159 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000160}
161
162void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
163 AU.setPreservesCFG();
164 AU.addRequired<AliasAnalysis>();
165 AU.addPreserved<AliasAnalysis>();
166 AU.addRequired<LiveIntervals>();
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000167 AU.addRequired<SlotIndexes>();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000168 AU.addPreserved<SlotIndexes>();
169 if (StrongPHIElim)
170 AU.addRequiredID(StrongPHIEliminationID);
171 AU.addRequiredTransitive<RegisterCoalescer>();
172 AU.addRequired<CalculateSpillWeights>();
173 AU.addRequired<LiveStacks>();
174 AU.addPreserved<LiveStacks>();
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +0000175 AU.addRequired<MachineDominatorTree>();
176 AU.addPreserved<MachineDominatorTree>();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000177 AU.addRequired<MachineLoopInfo>();
178 AU.addPreserved<MachineLoopInfo>();
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +0000179 AU.addRequired<MachineLoopRanges>();
180 AU.addPreserved<MachineLoopRanges>();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000181 AU.addRequired<VirtRegMap>();
182 AU.addPreserved<VirtRegMap>();
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000183 AU.addRequired<EdgeBundles>();
184 AU.addRequired<SpillPlacement>();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000185 MachineFunctionPass::getAnalysisUsage(AU);
186}
187
188void RAGreedy::releaseMemory() {
189 SpillerInstance.reset(0);
Jakob Stoklund Olesend2a50732011-02-23 00:56:56 +0000190 Generation.clear();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +0000191 RegAllocBase::releaseMemory();
192}
193
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +0000194void RAGreedy::enqueue(LiveInterval *LI) {
195 // Prioritize live ranges by size, assigning larger ranges first.
196 // The queue holds (size, reg) pairs.
Jakob Stoklund Olesen107d3662011-02-24 23:21:36 +0000197 const unsigned Size = LI->getSize();
198 const unsigned Reg = LI->reg;
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +0000199 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
200 "Can only enqueue virtual registers");
Jakob Stoklund Olesen107d3662011-02-24 23:21:36 +0000201 const unsigned Hint = VRM->getRegAllocPref(Reg);
202 unsigned Prio;
Jakob Stoklund Olesen90c1d7d2010-12-08 22:57:16 +0000203
Jakob Stoklund Olesend2a50732011-02-23 00:56:56 +0000204 Generation.grow(Reg);
205 if (++Generation[Reg] == 1)
Jakob Stoklund Olesen107d3662011-02-24 23:21:36 +0000206 // 1st generation ranges are handled first, long -> short.
207 Prio = (1u << 31) + Size;
208 else
209 // Repeat offenders are handled second, short -> long
210 Prio = (1u << 30) - Size;
Jakob Stoklund Olesend2a50732011-02-23 00:56:56 +0000211
Jakob Stoklund Olesen107d3662011-02-24 23:21:36 +0000212 // Boost ranges that have a physical register hint.
213 if (TargetRegisterInfo::isPhysicalRegister(Hint))
214 Prio |= (1u << 30);
215
216 Queue.push(std::make_pair(Prio, Reg));
Jakob Stoklund Olesen90c1d7d2010-12-08 22:57:16 +0000217}
218
Jakob Stoklund Olesen98d96482011-02-22 23:01:52 +0000219LiveInterval *RAGreedy::dequeue() {
220 if (Queue.empty())
221 return 0;
222 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
223 Queue.pop();
224 return LI;
225}
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +0000226
227//===----------------------------------------------------------------------===//
228// Register Reassignment
229//===----------------------------------------------------------------------===//
230
Jakob Stoklund Olesen6ce219e2010-12-10 20:45:04 +0000231// Check interference without using the cache.
232bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
233 unsigned PhysReg) {
Jakob Stoklund Olesen257c5562010-12-14 23:38:19 +0000234 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
235 LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
Jakob Stoklund Olesen6ce219e2010-12-10 20:45:04 +0000236 if (subQ.checkInterference())
237 return true;
238 }
239 return false;
240}
241
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000242/// getSingleInterference - Return the single interfering virtual register
243/// assigned to PhysReg. Return 0 if more than one virtual register is
244/// interfering.
245LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
246 unsigned PhysReg) {
Jakob Stoklund Olesen257c5562010-12-14 23:38:19 +0000247 // Check physreg and aliases.
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000248 LiveInterval *Interference = 0;
Jakob Stoklund Olesen257c5562010-12-14 23:38:19 +0000249 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000250 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
251 if (Q.checkInterference()) {
Jakob Stoklund Olesend84de8c2010-12-14 17:47:36 +0000252 if (Interference)
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000253 return 0;
Jakob Stoklund Olesen417df012011-02-23 00:29:55 +0000254 if (Q.collectInterferingVRegs(2) > 1)
Jakob Stoklund Olesend84de8c2010-12-14 17:47:36 +0000255 return 0;
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000256 Interference = Q.interferingVRegs().front();
257 }
258 }
259 return Interference;
260}
261
Andrew Trickb853e6c2010-12-09 18:15:21 +0000262// Attempt to reassign this virtual register to a different physical register.
263//
264// FIXME: we are not yet caching these "second-level" interferences discovered
265// in the sub-queries. These interferences can change with each call to
266// selectOrSplit. However, we could implement a "may-interfere" cache that
267// could be conservatively dirtied when we reassign or split.
268//
269// FIXME: This may result in a lot of alias queries. We could summarize alias
270// live intervals in their parent register's live union, but it's messy.
271bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000272 unsigned WantedPhysReg) {
273 assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
274 "Can only reassign virtual registers");
275 assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
Andrew Trickb853e6c2010-12-09 18:15:21 +0000276 "inconsistent phys reg assigment");
277
Jakob Stoklund Olesendd479e92010-12-10 22:21:05 +0000278 AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
279 while (unsigned PhysReg = Order.next()) {
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000280 // Don't reassign to a WantedPhysReg alias.
281 if (TRI->regsOverlap(PhysReg, WantedPhysReg))
Andrew Trickb853e6c2010-12-09 18:15:21 +0000282 continue;
283
Jakob Stoklund Olesen6ce219e2010-12-10 20:45:04 +0000284 if (checkUncachedInterference(InterferingVReg, PhysReg))
Andrew Trickb853e6c2010-12-09 18:15:21 +0000285 continue;
286
Andrew Trickb853e6c2010-12-09 18:15:21 +0000287 // Reassign the interfering virtual reg to this physical reg.
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000288 unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
289 DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
290 TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000291 unassign(InterferingVReg, OldAssign);
292 assign(InterferingVReg, PhysReg);
Jakob Stoklund Olesen0db841f2011-02-17 22:53:48 +0000293 ++NumReassigned;
Andrew Trickb853e6c2010-12-09 18:15:21 +0000294 return true;
295 }
296 return false;
297}
298
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000299/// tryReassign - Try to reassign a single interference to a different physreg.
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000300/// @param VirtReg Currently unassigned virtual register.
301/// @param Order Physregs to try.
302/// @return Physreg to assign VirtReg, or 0.
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000303unsigned RAGreedy::tryReassign(LiveInterval &VirtReg, AllocationOrder &Order,
304 SmallVectorImpl<LiveInterval*> &NewVRegs){
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000305 NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000306
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000307 Order.rewind();
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000308 while (unsigned PhysReg = Order.next()) {
309 LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
310 if (!InterferingVReg)
311 continue;
312 if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
313 continue;
314 if (reassignVReg(*InterferingVReg, PhysReg))
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +0000315 return PhysReg;
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000316 }
317 return 0;
318}
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000319
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000320
321//===----------------------------------------------------------------------===//
322// Interference eviction
323//===----------------------------------------------------------------------===//
324
325/// canEvict - Return true if all interferences between VirtReg and PhysReg can
326/// be evicted. Set maxWeight to the maximal spill weight of an interference.
327bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
328 unsigned Size, float &MaxWeight) {
329 float Weight = 0;
330 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
331 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
332 // If there is 10 or more interferences, chances are one is smaller.
333 if (Q.collectInterferingVRegs(10) >= 10)
334 return false;
335
336 // CHeck if any interfering live range is shorter than VirtReg.
337 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
338 LiveInterval *Intf = Q.interferingVRegs()[i];
339 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
340 return false;
341 if (Intf->getSize() <= Size)
342 return false;
343 Weight = std::max(Weight, Intf->weight);
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000344 }
345 }
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000346 MaxWeight = Weight;
347 return true;
348}
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000349
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000350/// tryEvict - Try to evict all interferences for a physreg.
351/// @param VirtReg Currently unassigned virtual register.
352/// @param Order Physregs to try.
353/// @return Physreg to assign VirtReg, or 0.
354unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
355 AllocationOrder &Order,
356 SmallVectorImpl<LiveInterval*> &NewVRegs){
357 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
358
359 // We can only evict interference if all interfering registers are virtual and
360 // longer than VirtReg.
361 const unsigned Size = VirtReg.getSize();
362
363 // Keep track of the lightest single interference seen so far.
364 float BestWeight = 0;
365 unsigned BestPhys = 0;
366
367 Order.rewind();
368 while (unsigned PhysReg = Order.next()) {
369 float Weight = 0;
370 if (!canEvictInterference(VirtReg, PhysReg, Size, Weight))
371 continue;
372
373 // This is an eviction candidate.
374 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
375 << Weight << '\n');
376 if (BestPhys && Weight >= BestWeight)
377 continue;
378
379 // Best so far.
380 BestPhys = PhysReg;
381 BestWeight = Weight;
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +0000382 }
383
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +0000384 if (!BestPhys)
385 return 0;
386
387 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
388 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
389 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
390 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
391 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
392 LiveInterval *Intf = Q.interferingVRegs()[i];
393 unassign(*Intf, VRM->getPhys(Intf->reg));
394 ++NumEvicted;
395 NewVRegs.push_back(Intf);
396 }
397 }
398 return BestPhys;
Andrew Trickb853e6c2010-12-09 18:15:21 +0000399}
400
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +0000401
402//===----------------------------------------------------------------------===//
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000403// Region Splitting
404//===----------------------------------------------------------------------===//
405
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000406/// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
407/// when considering interference from PhysReg. Also compute an optimistic local
408/// cost of this interference pattern.
409///
410/// The final cost of a split is the local cost + global cost of preferences
411/// broken by SpillPlacement.
412///
413float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
414 // Reset interference dependent info.
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000415 SpillConstraints.resize(SA->LiveBlocks.size());
416 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
417 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000418 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000419 BC.Number = BI.MBB->getNumber();
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000420 BC.Entry = (BI.Uses && BI.LiveIn) ?
421 SpillPlacement::PrefReg : SpillPlacement::DontCare;
422 BC.Exit = (BI.Uses && BI.LiveOut) ?
423 SpillPlacement::PrefReg : SpillPlacement::DontCare;
424 BI.OverlapEntry = BI.OverlapExit = false;
425 }
426
427 // Add interference info from each PhysReg alias.
428 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
429 if (!query(VirtReg, *AI).checkInterference())
430 continue;
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000431 LiveIntervalUnion::SegmentIter IntI =
432 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
433 if (!IntI.valid())
434 continue;
435
Jakob Stoklund Olesena50c5392011-02-08 23:02:58 +0000436 // Determine which blocks have interference live in or after the last split
437 // point.
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000438 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
439 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesena50c5392011-02-08 23:02:58 +0000440 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
441 SlotIndex Start, Stop;
442 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
443
444 // Skip interference-free blocks.
445 if (IntI.start() >= Stop)
446 continue;
447
448 // Is the interference live-in?
449 if (BI.LiveIn) {
450 IntI.advanceTo(Start);
451 if (!IntI.valid())
452 break;
453 if (IntI.start() <= Start)
454 BC.Entry = SpillPlacement::MustSpill;
455 }
456
457 // Is the interference overlapping the last split point?
458 if (BI.LiveOut) {
459 if (IntI.stop() < BI.LastSplitPoint)
460 IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
461 if (!IntI.valid())
462 break;
463 if (IntI.start() < Stop)
464 BC.Exit = SpillPlacement::MustSpill;
465 }
466 }
467
468 // Rewind iterator and check other interferences.
469 IntI.find(VirtReg.beginIndex());
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000470 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
471 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000472 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
473 SlotIndex Start, Stop;
474 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
475
476 // Skip interference-free blocks.
477 if (IntI.start() >= Stop)
478 continue;
479
480 // Handle transparent blocks with interference separately.
481 // Transparent blocks never incur any fixed cost.
482 if (BI.LiveThrough && !BI.Uses) {
Jakob Stoklund Olesena50c5392011-02-08 23:02:58 +0000483 IntI.advanceTo(Start);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000484 if (!IntI.valid())
485 break;
Jakob Stoklund Olesena50c5392011-02-08 23:02:58 +0000486 if (IntI.start() >= Stop)
487 continue;
488
489 if (BC.Entry != SpillPlacement::MustSpill)
490 BC.Entry = SpillPlacement::PrefSpill;
491 if (BC.Exit != SpillPlacement::MustSpill)
492 BC.Exit = SpillPlacement::PrefSpill;
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000493 continue;
494 }
495
496 // Now we only have blocks with uses left.
497 // Check if the interference overlaps the uses.
498 assert(BI.Uses && "Non-transparent block without any uses");
499
500 // Check interference on entry.
501 if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
502 IntI.advanceTo(Start);
503 if (!IntI.valid())
504 break;
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000505 // Not live in, but before the first use.
Jakob Stoklund Olesen06c0f252011-02-21 23:09:46 +0000506 if (IntI.start() < BI.FirstUse) {
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000507 BC.Entry = SpillPlacement::PrefSpill;
Jakob Stoklund Olesen06c0f252011-02-21 23:09:46 +0000508 // If the block contains a kill from an earlier split, never split
509 // again in the same block.
510 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Kill))
511 BC.Entry = SpillPlacement::MustSpill;
512 }
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000513 }
514
515 // Does interference overlap the uses in the entry segment
516 // [FirstUse;Kill)?
517 if (BI.LiveIn && !BI.OverlapEntry) {
518 IntI.advanceTo(BI.FirstUse);
519 if (!IntI.valid())
520 break;
521 // A live-through interval has no kill.
522 // Check [FirstUse;LastUse) instead.
523 if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
524 BI.OverlapEntry = true;
525 }
526
527 // Does interference overlap the uses in the exit segment [Def;LastUse)?
528 if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
529 IntI.advanceTo(BI.Def);
530 if (!IntI.valid())
531 break;
532 if (IntI.start() < BI.LastUse)
533 BI.OverlapExit = true;
534 }
535
536 // Check interference on exit.
537 if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
538 // Check interference between LastUse and Stop.
539 if (BC.Exit != SpillPlacement::PrefSpill) {
540 IntI.advanceTo(BI.LastUse);
541 if (!IntI.valid())
542 break;
Jakob Stoklund Olesen06c0f252011-02-21 23:09:46 +0000543 if (IntI.start() < Stop) {
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000544 BC.Exit = SpillPlacement::PrefSpill;
Jakob Stoklund Olesen06c0f252011-02-21 23:09:46 +0000545 // Avoid splitting twice in the same block.
546 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Def))
547 BC.Exit = SpillPlacement::MustSpill;
548 }
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000549 }
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000550 }
551 }
552 }
553
554 // Accumulate a local cost of this interference pattern.
555 float LocalCost = 0;
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000556 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
557 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000558 if (!BI.Uses)
559 continue;
560 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
561 unsigned Inserts = 0;
562
563 // Do we need spill code for the entry segment?
564 if (BI.LiveIn)
565 Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
566
567 // For the exit segment?
568 if (BI.LiveOut)
569 Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
570
571 // The local cost of spill code in this block is the block frequency times
572 // the number of spill instructions inserted.
573 if (Inserts)
574 LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
575 }
576 DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
577 << LocalCost << '\n');
578 return LocalCost;
579}
580
581/// calcGlobalSplitCost - Return the global split cost of following the split
582/// pattern in LiveBundles. This cost should be added to the local cost of the
583/// interference pattern in SpillConstraints.
584///
585float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
586 float GlobalCost = 0;
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000587 for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000588 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
589 unsigned Inserts = 0;
590 // Broken entry preference?
591 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
592 (BC.Entry == SpillPlacement::PrefReg);
593 // Broken exit preference?
594 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
595 (BC.Exit == SpillPlacement::PrefReg);
596 if (Inserts)
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000597 GlobalCost +=
598 Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000599 }
600 DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
601 return GlobalCost;
602}
603
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000604/// splitAroundRegion - Split VirtReg around the region determined by
605/// LiveBundles. Make an effort to avoid interference from PhysReg.
606///
607/// The 'register' interval is going to contain as many uses as possible while
608/// avoiding interference. The 'stack' interval is the complement constructed by
609/// SplitEditor. It will contain the rest.
610///
611void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
612 const BitVector &LiveBundles,
613 SmallVectorImpl<LiveInterval*> &NewVRegs) {
614 DEBUG({
615 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
616 << " with bundles";
617 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
618 dbgs() << " EB#" << i;
619 dbgs() << ".\n";
620 });
621
622 // First compute interference ranges in the live blocks.
623 typedef std::pair<SlotIndex, SlotIndex> IndexPair;
624 SmallVector<IndexPair, 8> InterferenceRanges;
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000625 InterferenceRanges.resize(SA->LiveBlocks.size());
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000626 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
627 if (!query(VirtReg, *AI).checkInterference())
628 continue;
629 LiveIntervalUnion::SegmentIter IntI =
630 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
631 if (!IntI.valid())
632 continue;
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000633 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
634 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000635 IndexPair &IP = InterferenceRanges[i];
636 SlotIndex Start, Stop;
637 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
638 // Skip interference-free blocks.
639 if (IntI.start() >= Stop)
640 continue;
641
642 // First interference in block.
643 if (BI.LiveIn) {
644 IntI.advanceTo(Start);
645 if (!IntI.valid())
646 break;
Jakob Stoklund Olesen2dfbb3e2011-02-03 20:29:43 +0000647 if (IntI.start() >= Stop)
648 continue;
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000649 if (!IP.first.isValid() || IntI.start() < IP.first)
650 IP.first = IntI.start();
651 }
652
653 // Last interference in block.
654 if (BI.LiveOut) {
655 IntI.advanceTo(Stop);
656 if (!IntI.valid() || IntI.start() >= Stop)
657 --IntI;
Jakob Stoklund Olesen2dfbb3e2011-02-03 20:29:43 +0000658 if (IntI.stop() <= Start)
659 continue;
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000660 if (!IP.second.isValid() || IntI.stop() > IP.second)
661 IP.second = IntI.stop();
662 }
663 }
664 }
665
666 SmallVector<LiveInterval*, 4> SpillRegs;
667 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
668 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
669
670 // Create the main cross-block interval.
671 SE.openIntv();
672
673 // First add all defs that are live out of a block.
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000674 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
675 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000676 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
677 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
678
679 // Should the register be live out?
680 if (!BI.LiveOut || !RegOut)
681 continue;
682
683 IndexPair &IP = InterferenceRanges[i];
684 SlotIndex Start, Stop;
685 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
686
687 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
Jakob Stoklund Olesen2dfbb3e2011-02-03 20:29:43 +0000688 << Bundles->getBundle(BI.MBB->getNumber(), 1)
689 << " intf [" << IP.first << ';' << IP.second << ')');
690
691 // The interference interval should either be invalid or overlap MBB.
692 assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
693 assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000694
695 // Check interference leaving the block.
Jakob Stoklund Olesen2dfbb3e2011-02-03 20:29:43 +0000696 if (!IP.second.isValid()) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000697 // Block is interference-free.
698 DEBUG(dbgs() << ", no interference");
699 if (!BI.Uses) {
700 assert(BI.LiveThrough && "No uses, but not live through block?");
701 // Block is live-through without interference.
702 DEBUG(dbgs() << ", no uses"
703 << (RegIn ? ", live-through.\n" : ", stack in.\n"));
704 if (!RegIn)
705 SE.enterIntvAtEnd(*BI.MBB);
706 continue;
707 }
708 if (!BI.LiveThrough) {
709 DEBUG(dbgs() << ", not live-through.\n");
Jakob Stoklund Olesen207c8682011-02-03 17:04:16 +0000710 SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000711 continue;
712 }
713 if (!RegIn) {
714 // Block is live-through, but entry bundle is on the stack.
715 // Reload just before the first use.
716 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
Jakob Stoklund Olesen207c8682011-02-03 17:04:16 +0000717 SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000718 continue;
719 }
720 DEBUG(dbgs() << ", live-through.\n");
721 continue;
722 }
723
724 // Block has interference.
725 DEBUG(dbgs() << ", interference to " << IP.second);
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000726
727 if (!BI.LiveThrough && IP.second <= BI.Def) {
728 // The interference doesn't reach the outgoing segment.
729 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
730 SE.useIntv(BI.Def, Stop);
731 continue;
732 }
733
734
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000735 if (!BI.Uses) {
736 // No uses in block, avoid interference by reloading as late as possible.
737 DEBUG(dbgs() << ", no uses.\n");
Jakob Stoklund Olesende710952011-02-05 01:06:36 +0000738 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
739 assert(SegStart >= IP.second && "Couldn't avoid interference");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000740 continue;
741 }
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000742
Jakob Stoklund Olesen8a2bbde2011-02-08 23:26:48 +0000743 if (IP.second.getBoundaryIndex() < BI.LastUse) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000744 // There are interference-free uses at the end of the block.
745 // Find the first use that can get the live-out register.
Jakob Stoklund Olesenc0de9952011-01-20 17:45:23 +0000746 SmallVectorImpl<SlotIndex>::const_iterator UI =
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000747 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
748 IP.second.getBoundaryIndex());
Jakob Stoklund Olesenc0de9952011-01-20 17:45:23 +0000749 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
750 SlotIndex Use = *UI;
Jakob Stoklund Olesenc0de9952011-01-20 17:45:23 +0000751 assert(Use <= BI.LastUse && "Couldn't find last use");
Jakob Stoklund Olesen8a2bbde2011-02-08 23:26:48 +0000752 // Only attempt a split befroe the last split point.
753 if (Use.getBaseIndex() <= BI.LastSplitPoint) {
754 DEBUG(dbgs() << ", free use at " << Use << ".\n");
755 SlotIndex SegStart = SE.enterIntvBefore(Use);
756 assert(SegStart >= IP.second && "Couldn't avoid interference");
757 assert(SegStart < BI.LastSplitPoint && "Impossible split point");
758 SE.useIntv(SegStart, Stop);
759 continue;
760 }
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000761 }
762
763 // Interference is after the last use.
764 DEBUG(dbgs() << " after last use.\n");
Jakob Stoklund Olesende710952011-02-05 01:06:36 +0000765 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
766 assert(SegStart >= IP.second && "Couldn't avoid interference");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000767 }
768
769 // Now all defs leading to live bundles are handled, do everything else.
Jakob Stoklund Olesenf0ac26c2011-02-09 22:50:26 +0000770 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
771 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000772 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
773 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
774
775 // Is the register live-in?
776 if (!BI.LiveIn || !RegIn)
777 continue;
778
779 // We have an incoming register. Check for interference.
780 IndexPair &IP = InterferenceRanges[i];
781 SlotIndex Start, Stop;
782 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
783
784 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
785 << " -> BB#" << BI.MBB->getNumber());
786
787 // Check interference entering the block.
Jakob Stoklund Olesen2dfbb3e2011-02-03 20:29:43 +0000788 if (!IP.first.isValid()) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000789 // Block is interference-free.
790 DEBUG(dbgs() << ", no interference");
791 if (!BI.Uses) {
792 assert(BI.LiveThrough && "No uses, but not live through block?");
793 // Block is live-through without interference.
794 if (RegOut) {
795 DEBUG(dbgs() << ", no uses, live-through.\n");
796 SE.useIntv(Start, Stop);
797 } else {
798 DEBUG(dbgs() << ", no uses, stack-out.\n");
799 SE.leaveIntvAtTop(*BI.MBB);
800 }
801 continue;
802 }
803 if (!BI.LiveThrough) {
804 DEBUG(dbgs() << ", killed in block.\n");
Jakob Stoklund Olesen207c8682011-02-03 17:04:16 +0000805 SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000806 continue;
807 }
808 if (!RegOut) {
809 // Block is live-through, but exit bundle is on the stack.
810 // Spill immediately after the last use.
Jakob Stoklund Olesen5c716bd2011-02-08 18:50:21 +0000811 if (BI.LastUse < BI.LastSplitPoint) {
812 DEBUG(dbgs() << ", uses, stack-out.\n");
813 SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
814 continue;
815 }
816 // The last use is after the last split point, it is probably an
817 // indirect jump.
818 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
819 << BI.LastSplitPoint << ", stack-out.\n");
Jakob Stoklund Olesen23cd57c2011-02-09 23:33:02 +0000820 SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
821 SE.useIntv(Start, SegEnd);
Jakob Stoklund Olesen5c716bd2011-02-08 18:50:21 +0000822 // Run a double interval from the split to the last use.
823 // This makes it possible to spill the complement without affecting the
824 // indirect branch.
825 SE.overlapIntv(SegEnd, BI.LastUse);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000826 continue;
827 }
828 // Register is live-through.
829 DEBUG(dbgs() << ", uses, live-through.\n");
830 SE.useIntv(Start, Stop);
831 continue;
832 }
833
834 // Block has interference.
835 DEBUG(dbgs() << ", interference from " << IP.first);
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000836
837 if (!BI.LiveThrough && IP.first >= BI.Kill) {
838 // The interference doesn't reach the outgoing segment.
839 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
840 SE.useIntv(Start, BI.Kill);
841 continue;
842 }
843
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000844 if (!BI.Uses) {
845 // No uses in block, avoid interference by spilling as soon as possible.
846 DEBUG(dbgs() << ", no uses.\n");
Jakob Stoklund Olesende710952011-02-05 01:06:36 +0000847 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
848 assert(SegEnd <= IP.first && "Couldn't avoid interference");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000849 continue;
850 }
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000851 if (IP.first.getBaseIndex() > BI.FirstUse) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000852 // There are interference-free uses at the beginning of the block.
853 // Find the last use that can get the register.
Jakob Stoklund Olesenc0de9952011-01-20 17:45:23 +0000854 SmallVectorImpl<SlotIndex>::const_iterator UI =
Jakob Stoklund Olesenfe3f99f2011-02-05 01:06:39 +0000855 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
856 IP.first.getBaseIndex());
Jakob Stoklund Olesenc0de9952011-01-20 17:45:23 +0000857 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
858 SlotIndex Use = (--UI)->getBoundaryIndex();
859 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
Jakob Stoklund Olesende710952011-02-05 01:06:36 +0000860 SlotIndex SegEnd = SE.leaveIntvAfter(Use);
861 assert(SegEnd <= IP.first && "Couldn't avoid interference");
862 SE.useIntv(Start, SegEnd);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000863 continue;
864 }
865
866 // Interference is before the first use.
867 DEBUG(dbgs() << " before first use.\n");
Jakob Stoklund Olesende710952011-02-05 01:06:36 +0000868 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
869 assert(SegEnd <= IP.first && "Couldn't avoid interference");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000870 }
871
872 SE.closeIntv();
873
874 // FIXME: Should we be more aggressive about splitting the stack region into
875 // per-block segments? The current approach allows the stack region to
876 // separate into connected components. Some components may be allocatable.
877 SE.finish();
Jakob Stoklund Olesen0db841f2011-02-17 22:53:48 +0000878 ++NumGlobalSplits;
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000879
Jakob Stoklund Olesen9b3d24b2011-02-04 19:33:07 +0000880 if (VerifyEnabled) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000881 MF->verify(this, "After splitting live range around region");
Jakob Stoklund Olesen9b3d24b2011-02-04 19:33:07 +0000882
883#ifndef NDEBUG
884 // Make sure that at least one of the new intervals can allocate to PhysReg.
885 // That was the whole point of splitting the live range.
886 bool found = false;
887 for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
888 ++I)
889 if (!checkUncachedInterference(**I, PhysReg)) {
890 found = true;
891 break;
892 }
893 assert(found && "No allocatable intervals after pointless splitting");
894#endif
895 }
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000896}
897
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000898unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
899 SmallVectorImpl<LiveInterval*> &NewVRegs) {
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000900 BitVector LiveBundles, BestBundles;
901 float BestCost = 0;
902 unsigned BestReg = 0;
903 Order.rewind();
904 while (unsigned PhysReg = Order.next()) {
905 float Cost = calcInterferenceInfo(VirtReg, PhysReg);
906 if (BestReg && Cost >= BestCost)
907 continue;
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000908
909 SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
910 // No live bundles, defer to splitSingleBlocks().
911 if (!LiveBundles.any())
912 continue;
913
914 Cost += calcGlobalSplitCost(LiveBundles);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000915 if (!BestReg || Cost < BestCost) {
916 BestReg = PhysReg;
917 BestCost = Cost;
918 BestBundles.swap(LiveBundles);
919 }
920 }
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000921
922 if (!BestReg)
923 return 0;
924
925 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +0000926 return 0;
927}
928
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +0000929
930//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +0000931// Local Splitting
932//===----------------------------------------------------------------------===//
933
934
935/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
936/// in order to use PhysReg between two entries in SA->UseSlots.
937///
938/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
939///
940void RAGreedy::calcGapWeights(unsigned PhysReg,
941 SmallVectorImpl<float> &GapWeight) {
942 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
943 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
944 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
945 const unsigned NumGaps = Uses.size()-1;
946
947 // Start and end points for the interference check.
948 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
949 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
950
951 GapWeight.assign(NumGaps, 0.0f);
952
953 // Add interference from each overlapping register.
954 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
955 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
956 .checkInterference())
957 continue;
958
959 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
960 // so we don't need InterferenceQuery.
961 //
962 // Interference that overlaps an instruction is counted in both gaps
963 // surrounding the instruction. The exception is interference before
964 // StartIdx and after StopIdx.
965 //
966 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
967 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
968 // Skip the gaps before IntI.
969 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
970 if (++Gap == NumGaps)
971 break;
972 if (Gap == NumGaps)
973 break;
974
975 // Update the gaps covered by IntI.
976 const float weight = IntI.value()->weight;
977 for (; Gap != NumGaps; ++Gap) {
978 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
979 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
980 break;
981 }
982 if (Gap == NumGaps)
983 break;
984 }
985 }
986}
987
988/// getPrevMappedIndex - Return the slot index of the last non-copy instruction
989/// before MI that has a slot index. If MI is the first mapped instruction in
990/// its block, return the block start index instead.
991///
992SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
993 assert(MI && "Missing MachineInstr");
994 const MachineBasicBlock *MBB = MI->getParent();
995 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
996 while (I != B)
997 if (!(--I)->isDebugValue() && !I->isCopy())
998 return Indexes->getInstructionIndex(I);
999 return Indexes->getMBBStartIdx(MBB);
1000}
1001
1002/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
1003/// real non-copy instruction for each instruction in SA->UseSlots.
1004///
1005void RAGreedy::calcPrevSlots() {
1006 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1007 PrevSlot.clear();
1008 PrevSlot.reserve(Uses.size());
1009 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1010 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1011 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1012 }
1013}
1014
1015/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1016/// be beneficial to split before UseSlots[i].
1017///
1018/// 0 is always a valid split point
1019unsigned RAGreedy::nextSplitPoint(unsigned i) {
1020 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1021 const unsigned Size = Uses.size();
1022 assert(i != Size && "No split points after the end");
1023 // Allow split before i when Uses[i] is not adjacent to the previous use.
1024 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1025 ;
1026 return i;
1027}
1028
1029/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1030/// basic block.
1031///
1032unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1033 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1034 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
1035 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
1036
1037 // Note that it is possible to have an interval that is live-in or live-out
1038 // while only covering a single block - A phi-def can use undef values from
1039 // predecessors, and the block could be a single-block loop.
1040 // We don't bother doing anything clever about such a case, we simply assume
1041 // that the interval is continuous from FirstUse to LastUse. We should make
1042 // sure that we don't do anything illegal to such an interval, though.
1043
1044 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1045 if (Uses.size() <= 2)
1046 return 0;
1047 const unsigned NumGaps = Uses.size()-1;
1048
1049 DEBUG({
1050 dbgs() << "tryLocalSplit: ";
1051 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1052 dbgs() << ' ' << SA->UseSlots[i];
1053 dbgs() << '\n';
1054 });
1055
1056 // For every use, find the previous mapped non-copy instruction.
1057 // We use this to detect valid split points, and to estimate new interval
1058 // sizes.
1059 calcPrevSlots();
1060
1061 unsigned BestBefore = NumGaps;
1062 unsigned BestAfter = 0;
1063 float BestDiff = 0;
1064
1065 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB);
1066 SmallVector<float, 8> GapWeight;
1067
1068 Order.rewind();
1069 while (unsigned PhysReg = Order.next()) {
1070 // Keep track of the largest spill weight that would need to be evicted in
1071 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1072 calcGapWeights(PhysReg, GapWeight);
1073
1074 // Try to find the best sequence of gaps to close.
1075 // The new spill weight must be larger than any gap interference.
1076
1077 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1078 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1079
1080 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1081 // It is the spill weight that needs to be evicted.
1082 float MaxGap = GapWeight[0];
1083 for (unsigned i = 1; i != SplitAfter; ++i)
1084 MaxGap = std::max(MaxGap, GapWeight[i]);
1085
1086 for (;;) {
1087 // Live before/after split?
1088 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1089 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1090
1091 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1092 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1093 << " i=" << MaxGap);
1094
1095 // Stop before the interval gets so big we wouldn't be making progress.
1096 if (!LiveBefore && !LiveAfter) {
1097 DEBUG(dbgs() << " all\n");
1098 break;
1099 }
1100 // Should the interval be extended or shrunk?
1101 bool Shrink = true;
1102 if (MaxGap < HUGE_VALF) {
1103 // Estimate the new spill weight.
1104 //
1105 // Each instruction reads and writes the register, except the first
1106 // instr doesn't read when !FirstLive, and the last instr doesn't write
1107 // when !LastLive.
1108 //
1109 // We will be inserting copies before and after, so the total number of
1110 // reads and writes is 2 * EstUses.
1111 //
1112 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1113 2*(LiveBefore + LiveAfter);
1114
1115 // Try to guess the size of the new interval. This should be trivial,
1116 // but the slot index of an inserted copy can be a lot smaller than the
1117 // instruction it is inserted before if there are many dead indexes
1118 // between them.
1119 //
1120 // We measure the distance from the instruction before SplitBefore to
1121 // get a conservative estimate.
1122 //
1123 // The final distance can still be different if inserting copies
1124 // triggers a slot index renumbering.
1125 //
1126 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1127 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1128 // Would this split be possible to allocate?
1129 // Never allocate all gaps, we wouldn't be making progress.
1130 float Diff = EstWeight - MaxGap;
1131 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1132 if (Diff > 0) {
1133 Shrink = false;
1134 if (Diff > BestDiff) {
1135 DEBUG(dbgs() << " (best)");
1136 BestDiff = Diff;
1137 BestBefore = SplitBefore;
1138 BestAfter = SplitAfter;
1139 }
1140 }
1141 }
1142
1143 // Try to shrink.
1144 if (Shrink) {
1145 SplitBefore = nextSplitPoint(SplitBefore);
1146 if (SplitBefore < SplitAfter) {
1147 DEBUG(dbgs() << " shrink\n");
1148 // Recompute the max when necessary.
1149 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1150 MaxGap = GapWeight[SplitBefore];
1151 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1152 MaxGap = std::max(MaxGap, GapWeight[i]);
1153 }
1154 continue;
1155 }
1156 MaxGap = 0;
1157 }
1158
1159 // Try to extend the interval.
1160 if (SplitAfter >= NumGaps) {
1161 DEBUG(dbgs() << " end\n");
1162 break;
1163 }
1164
1165 DEBUG(dbgs() << " extend\n");
1166 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1167 SplitAfter != e; ++SplitAfter)
1168 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1169 continue;
1170 }
1171 }
1172
1173 // Didn't find any candidates?
1174 if (BestBefore == NumGaps)
1175 return 0;
1176
1177 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1178 << '-' << Uses[BestAfter] << ", " << BestDiff
1179 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1180
1181 SmallVector<LiveInterval*, 4> SpillRegs;
1182 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1183 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
1184
1185 SE.openIntv();
1186 SlotIndex SegStart = SE.enterIntvBefore(Uses[BestBefore]);
1187 SlotIndex SegStop = SE.leaveIntvAfter(Uses[BestAfter]);
1188 SE.useIntv(SegStart, SegStop);
1189 SE.closeIntv();
1190 SE.finish();
Jakob Stoklund Olesen0db841f2011-02-17 22:53:48 +00001191 ++NumLocalSplits;
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +00001192
1193 return 0;
1194}
1195
1196//===----------------------------------------------------------------------===//
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001197// Live Range Splitting
1198//===----------------------------------------------------------------------===//
1199
1200/// trySplit - Try to split VirtReg or one of its interferences, making it
1201/// assignable.
1202/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1203unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1204 SmallVectorImpl<LiveInterval*>&NewVRegs) {
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001205 SA->analyze(&VirtReg);
1206
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +00001207 // Local intervals are handled separately.
Jakob Stoklund Olesena2ebf602011-02-19 00:38:40 +00001208 if (LIS->intervalIsInOneMBB(VirtReg)) {
1209 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesen034a80d2011-02-17 19:13:53 +00001210 return tryLocalSplit(VirtReg, Order, NewVRegs);
Jakob Stoklund Olesena2ebf602011-02-19 00:38:40 +00001211 }
1212
1213 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001214
1215 // First try to split around a region spanning multiple blocks.
1216 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1217 if (PhysReg || !NewVRegs.empty())
1218 return PhysReg;
1219
1220 // Then isolate blocks with multiple uses.
1221 SplitAnalysis::BlockPtrSet Blocks;
1222 if (SA->getMultiUseBlocks(Blocks)) {
1223 SmallVector<LiveInterval*, 4> SpillRegs;
1224 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1225 SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
Jakob Stoklund Olesen207c8682011-02-03 17:04:16 +00001226 if (VerifyEnabled)
1227 MF->verify(this, "After splitting live range around basic blocks");
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001228 }
1229
1230 // Don't assign any physregs.
1231 return 0;
1232}
1233
1234
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +00001235//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001236// Spilling
1237//===----------------------------------------------------------------------===//
1238
1239/// calcInterferenceWeight - Calculate the combined spill weight of
1240/// interferences when assigning VirtReg to PhysReg.
1241float RAGreedy::calcInterferenceWeight(LiveInterval &VirtReg, unsigned PhysReg){
1242 float Sum = 0;
1243 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1244 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1245 Q.collectInterferingVRegs();
1246 if (Q.seenUnspillableVReg())
1247 return HUGE_VALF;
1248 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i)
1249 Sum += Q.interferingVRegs()[i]->weight;
1250 }
1251 return Sum;
1252}
1253
1254/// trySpillInterferences - Try to spill interfering registers instead of the
1255/// current one. Only do it if the accumulated spill weight is smaller than the
1256/// current spill weight.
1257unsigned RAGreedy::trySpillInterferences(LiveInterval &VirtReg,
1258 AllocationOrder &Order,
1259 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1260 NamedRegionTimer T("Spill Interference", TimerGroupName, TimePassesIsEnabled);
1261 unsigned BestPhys = 0;
Duncan Sands2aea4902010-12-28 10:07:15 +00001262 float BestWeight = 0;
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001263
1264 Order.rewind();
1265 while (unsigned PhysReg = Order.next()) {
1266 float Weight = calcInterferenceWeight(VirtReg, PhysReg);
1267 if (Weight == HUGE_VALF || Weight >= VirtReg.weight)
1268 continue;
1269 if (!BestPhys || Weight < BestWeight)
1270 BestPhys = PhysReg, BestWeight = Weight;
1271 }
1272
1273 // No candidates found.
1274 if (!BestPhys)
1275 return 0;
1276
1277 // Collect all interfering registers.
1278 SmallVector<LiveInterval*, 8> Spills;
1279 for (const unsigned *AI = TRI->getOverlaps(BestPhys); *AI; ++AI) {
1280 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1281 Spills.append(Q.interferingVRegs().begin(), Q.interferingVRegs().end());
1282 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
1283 LiveInterval *VReg = Q.interferingVRegs()[i];
Jakob Stoklund Olesen27106382011-02-09 01:14:03 +00001284 unassign(*VReg, *AI);
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001285 }
1286 }
1287
1288 // Spill them all.
1289 DEBUG(dbgs() << "spilling " << Spills.size() << " interferences with weight "
1290 << BestWeight << '\n');
1291 for (unsigned i = 0, e = Spills.size(); i != e; ++i)
1292 spiller().spill(Spills[i], NewVRegs, Spills);
1293 return BestPhys;
1294}
1295
1296
1297//===----------------------------------------------------------------------===//
1298// Main Entry Point
1299//===----------------------------------------------------------------------===//
1300
1301unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001302 SmallVectorImpl<LiveInterval*> &NewVRegs) {
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001303 // First try assigning a free register.
Jakob Stoklund Olesendd479e92010-12-10 22:21:05 +00001304 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1305 while (unsigned PhysReg = Order.next()) {
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001306 if (!checkPhysRegInterference(VirtReg, PhysReg))
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001307 return PhysReg;
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001308 }
Andrew Trickb853e6c2010-12-09 18:15:21 +00001309
Jakob Stoklund Olesen98c81412011-02-23 00:29:52 +00001310 if (unsigned PhysReg = tryReassign(VirtReg, Order, NewVRegs))
1311 return PhysReg;
1312
1313 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +00001314 return PhysReg;
Andrew Trickb853e6c2010-12-09 18:15:21 +00001315
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001316 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1317
Jakob Stoklund Olesen107d3662011-02-24 23:21:36 +00001318 // The first time we see a live range, don't try to split or spill.
1319 // Wait until the second time, when all smaller ranges have been allocated.
1320 // This gives a better picture of the interference to split around.
1321 if (Generation[VirtReg.reg] == 1) {
1322 NewVRegs.push_back(&VirtReg);
1323 return 0;
1324 }
1325
Jakob Stoklund Olesen46c83c82010-12-14 00:37:49 +00001326 // Try splitting VirtReg or interferences.
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001327 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1328 if (PhysReg || !NewVRegs.empty())
Jakob Stoklund Olesenb64d92e2010-12-14 00:37:44 +00001329 return PhysReg;
1330
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001331 // Try to spill another interfering reg with less spill weight.
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001332 PhysReg = trySpillInterferences(VirtReg, Order, NewVRegs);
Jakob Stoklund Olesen770d42d2010-12-22 22:01:30 +00001333 if (PhysReg)
1334 return PhysReg;
1335
1336 // Finally spill VirtReg itself.
Jakob Stoklund Olesen533f58e2010-12-11 00:19:56 +00001337 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001338 SmallVector<LiveInterval*, 1> pendingSpills;
Jakob Stoklund Olesenccdb3fc2011-01-19 22:11:48 +00001339 spiller().spill(&VirtReg, NewVRegs, pendingSpills);
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001340
1341 // The live virtual register requesting allocation was spilled, so tell
1342 // the caller not to allocate anything during this round.
1343 return 0;
1344}
1345
1346bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1347 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1348 << "********** Function: "
1349 << ((Value*)mf.getFunction())->getName() << '\n');
1350
1351 MF = &mf;
Jakob Stoklund Olesenaf249642010-12-17 23:16:35 +00001352 if (VerifyEnabled)
Jakob Stoklund Olesen89cab932010-12-18 00:06:56 +00001353 MF->verify(this, "Before greedy register allocator");
Jakob Stoklund Olesenaf249642010-12-17 23:16:35 +00001354
Jakob Stoklund Olesen4680dec2010-12-10 23:49:00 +00001355 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +00001356 Indexes = &getAnalysis<SlotIndexes>();
Jakob Stoklund Olesenf428eb62010-12-17 23:16:32 +00001357 DomTree = &getAnalysis<MachineDominatorTree>();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001358 ReservedRegs = TRI->getReservedRegs(*MF);
Jakob Stoklund Olesenf6dff842010-12-10 22:54:44 +00001359 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +00001360 Loops = &getAnalysis<MachineLoopInfo>();
1361 LoopRanges = &getAnalysis<MachineLoopRanges>();
Jakob Stoklund Olesenb5fa9332011-01-18 21:13:27 +00001362 Bundles = &getAnalysis<EdgeBundles>();
1363 SpillPlacer = &getAnalysis<SpillPlacement>();
1364
Jakob Stoklund Olesen1b847de2011-02-19 00:53:42 +00001365 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
Jakob Stoklund Olesend0bb5e22010-12-15 23:46:13 +00001366
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001367 allocatePhysRegs();
1368 addMBBLiveIns(MF);
Jakob Stoklund Olesen8a61da82011-02-08 21:13:03 +00001369 LIS->addKillFlags();
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001370
1371 // Run rewriter
Jakob Stoklund Olesen533f58e2010-12-11 00:19:56 +00001372 {
1373 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesenba05c012011-02-18 22:03:18 +00001374 VRM->rewrite(Indexes);
Jakob Stoklund Olesen533f58e2010-12-11 00:19:56 +00001375 }
Jakob Stoklund Olesencba2e062010-12-08 03:26:16 +00001376
1377 // The pass output is in VirtRegMap. Release all the transient data.
1378 releaseMemory();
1379
1380 return true;
1381}