blob: e65398f592dcaab3739caa057f67fb397c312703 [file] [log] [blame]
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00001//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the RAGreedy function pass for register allocation in
11// optimized builds.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "regalloc"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/CodeGen/Passes.h"
Jakob Stoklund Olesen4d7432e2010-12-10 22:21:05 +000017#include "AllocationOrder.h"
Jakob Stoklund Olesen91cbcaf2011-04-02 06:03:35 +000018#include "InterferenceCache.h"
Jakob Stoklund Olesen6aa0fbf2011-04-05 21:40:37 +000019#include "LiveDebugVariables.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000020#include "RegAllocBase.h"
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +000021#include "SpillPlacement.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000022#include "Spiller.h"
Jakob Stoklund Olesene7601e92010-12-15 23:46:13 +000023#include "SplitKit.h"
Jakob Stoklund Olesen99827e82011-02-17 22:53:48 +000024#include "llvm/ADT/Statistic.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000025#include "llvm/Analysis/AliasAnalysis.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000026#include "llvm/CodeGen/CalcSpillWeights.h"
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +000027#include "llvm/CodeGen/EdgeBundles.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000028#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Pete Cooper3ca96f92012-04-02 22:44:18 +000029#include "llvm/CodeGen/LiveRangeEdit.h"
Jakob Stoklund Olesen26c9d702012-11-28 19:13:06 +000030#include "llvm/CodeGen/LiveRegMatrix.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000031#include "llvm/CodeGen/LiveStackAnalysis.h"
Benjamin Kramere2a1d892013-06-17 19:00:36 +000032#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
Jakob Stoklund Olesen1740e002010-12-17 23:16:32 +000033#include "llvm/CodeGen/MachineDominators.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000034#include "llvm/CodeGen/MachineFunctionPass.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000035#include "llvm/CodeGen/MachineLoopInfo.h"
36#include "llvm/CodeGen/MachineRegisterInfo.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000037#include "llvm/CodeGen/RegAllocRegistry.h"
Quentin Colombet1fb3362a2014-01-02 22:47:22 +000038#include "llvm/CodeGen/RegisterClassInfo.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000039#include "llvm/CodeGen/VirtRegMap.h"
Quentin Colombet96bd2a12014-04-04 02:05:21 +000040#include "llvm/IR/LLVMContext.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000041#include "llvm/PassAnalysisSupport.h"
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +000042#include "llvm/Support/BranchProbability.h"
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +000043#include "llvm/Support/CommandLine.h"
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000044#include "llvm/Support/Debug.h"
45#include "llvm/Support/ErrorHandling.h"
Jakob Stoklund Olesen92da7052010-12-11 00:19:56 +000046#include "llvm/Support/Timer.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000047#include "llvm/Support/raw_ostream.h"
Jakob Stoklund Olesen2329c542011-02-22 23:01:52 +000048#include <queue>
49
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000050using namespace llvm;
51
Jakob Stoklund Olesen99827e82011-02-17 22:53:48 +000052STATISTIC(NumGlobalSplits, "Number of split global live ranges");
53STATISTIC(NumLocalSplits, "Number of split local live ranges");
Jakob Stoklund Olesen99827e82011-02-17 22:53:48 +000054STATISTIC(NumEvicted, "Number of interferences evicted");
55
Jakob Stoklund Oleseneecb2fb2011-09-12 16:49:21 +000056static cl::opt<SplitEditor::ComplementSpillMode>
57SplitSpillMode("split-spill-mode", cl::Hidden,
58 cl::desc("Spill mode for splitting live ranges"),
59 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
60 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
61 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
62 clEnumValEnd),
63 cl::init(SplitEditor::SM_Partition));
64
Quentin Colombet87769712014-02-05 22:13:59 +000065static cl::opt<unsigned>
66LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
67 cl::desc("Last chance recoloring max depth"),
68 cl::init(5));
69
70static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
71 "lcr-max-interf", cl::Hidden,
72 cl::desc("Last chance recoloring maximum number of considered"
73 " interference at a time"),
74 cl::init(8));
75
Manman Ren78cf02a2014-03-25 00:16:25 +000076// FIXME: Find a good default for this flag and remove the flag.
77static cl::opt<unsigned>
78CSRFirstTimeCost("regalloc-csr-first-time-cost",
79 cl::desc("Cost for first time use of callee-saved register."),
80 cl::init(0), cl::Hidden);
81
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000082static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
83 createGreedyRegisterAllocator);
84
85namespace {
Jakob Stoklund Olesen8e089642011-03-09 00:57:29 +000086class RAGreedy : public MachineFunctionPass,
87 public RegAllocBase,
88 private LiveRangeEdit::Delegate {
Quentin Colombet87769712014-02-05 22:13:59 +000089 // Convenient shortcuts.
90 typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
91 typedef SmallPtrSet<LiveInterval *, 4> SmallLISet;
92 typedef SmallSet<unsigned, 16> SmallVirtRegSet;
Jakob Stoklund Olesen8e089642011-03-09 00:57:29 +000093
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000094 // context
95 MachineFunction *MF;
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +000096
Quentin Colombet1fb3362a2014-01-02 22:47:22 +000097 // Shortcuts to some useful interface.
98 const TargetInstrInfo *TII;
99 const TargetRegisterInfo *TRI;
100 RegisterClassInfo RCI;
101
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000102 // analyses
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000103 SlotIndexes *Indexes;
Benjamin Kramere2a1d892013-06-17 19:00:36 +0000104 MachineBlockFrequencyInfo *MBFI;
Jakob Stoklund Olesen1740e002010-12-17 23:16:32 +0000105 MachineDominatorTree *DomTree;
Jakob Stoklund Olesene7601e92010-12-15 23:46:13 +0000106 MachineLoopInfo *Loops;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000107 EdgeBundles *Bundles;
108 SpillPlacement *SpillPlacer;
Jakob Stoklund Olesenf8da0282011-05-06 18:00:02 +0000109 LiveDebugVariables *DebugVars;
Jakob Stoklund Olesen1740e002010-12-17 23:16:32 +0000110
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000111 // state
Ahmed Charles56440fd2014-03-06 05:51:42 +0000112 std::unique_ptr<Spiller> SpillerInstance;
Quentin Colombet87769712014-02-05 22:13:59 +0000113 PQueue Queue;
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000114 unsigned NextCascade;
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +0000115
116 // Live ranges pass through a number of stages as we try to allocate them.
117 // Some of the stages may also create new live ranges:
118 //
119 // - Region splitting.
120 // - Per-block splitting.
121 // - Local splitting.
122 // - Spilling.
123 //
124 // Ranges produced by one of the stages skip the previous stages when they are
125 // dequeued. This improves performance because we can skip interference checks
126 // that are unlikely to give any results. It also guarantees that the live
127 // range splitting algorithm terminates, something that is otherwise hard to
128 // ensure.
129 enum LiveRangeStage {
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000130 /// Newly created live range that has never been queued.
131 RS_New,
132
133 /// Only attempt assignment and eviction. Then requeue as RS_Split.
134 RS_Assign,
135
136 /// Attempt live range splitting if assignment is impossible.
137 RS_Split,
138
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +0000139 /// Attempt more aggressive live range splitting that is guaranteed to make
140 /// progress. This is used for split products that may not be making
141 /// progress.
142 RS_Split2,
143
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000144 /// Live range will be spilled. No more splitting will be attempted.
145 RS_Spill,
146
147 /// There is nothing more we can do to this live range. Abort compilation
148 /// if it can't be assigned.
149 RS_Done
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +0000150 };
151
Quentin Colombet96bd2a12014-04-04 02:05:21 +0000152 // Enum CutOffStage to keep a track whether the register allocation failed
153 // because of the cutoffs encountered in last chance recoloring.
154 // Note: This is used as bitmask. New value should be next power of 2.
155 enum CutOffStage {
156 // No cutoffs encountered
157 CO_None = 0,
158
159 // lcr-max-depth cutoff encountered
160 CO_Depth = 1,
161
162 // lcr-max-interf cutoff encountered
163 CO_Interf = 2
164 };
165
166 uint8_t CutOffInfo;
167
Eli Friedman78bffa52013-09-10 23:18:14 +0000168#ifndef NDEBUG
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000169 static const char *const StageName[];
Eli Friedman78bffa52013-09-10 23:18:14 +0000170#endif
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000171
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000172 // RegInfo - Keep additional information about each live range.
173 struct RegInfo {
174 LiveRangeStage Stage;
175
176 // Cascade - Eviction loop prevention. See canEvictInterference().
177 unsigned Cascade;
178
179 RegInfo() : Stage(RS_New), Cascade(0) {}
180 };
181
182 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +0000183
184 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000185 return ExtraRegInfo[VirtReg.reg].Stage;
186 }
187
188 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
189 ExtraRegInfo.resize(MRI->getNumVirtRegs());
190 ExtraRegInfo[VirtReg.reg].Stage = Stage;
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +0000191 }
192
193 template<typename Iterator>
194 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000195 ExtraRegInfo.resize(MRI->getNumVirtRegs());
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000196 for (;Begin != End; ++Begin) {
Mark Laceyf9ea8852013-08-14 23:50:04 +0000197 unsigned Reg = *Begin;
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000198 if (ExtraRegInfo[Reg].Stage == RS_New)
199 ExtraRegInfo[Reg].Stage = NewStage;
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000200 }
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +0000201 }
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000202
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000203 /// Cost of evicting interference.
204 struct EvictionCost {
205 unsigned BrokenHints; ///< Total number of broken hints.
206 float MaxWeight; ///< Maximum spill weight evicted.
207
Andrew Trick3621b8a2013-11-22 19:07:38 +0000208 EvictionCost(): BrokenHints(0), MaxWeight(0) {}
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000209
Andrew Trick84852572013-07-25 18:35:14 +0000210 bool isMax() const { return BrokenHints == ~0u; }
211
Andrew Trick3621b8a2013-11-22 19:07:38 +0000212 void setMax() { BrokenHints = ~0u; }
213
214 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
215
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000216 bool operator<(const EvictionCost &O) const {
Benjamin Kramerb2f034b2014-03-03 19:58:30 +0000217 return std::tie(BrokenHints, MaxWeight) <
218 std::tie(O.BrokenHints, O.MaxWeight);
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000219 }
220 };
221
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000222 // splitting state.
Ahmed Charles56440fd2014-03-06 05:51:42 +0000223 std::unique_ptr<SplitAnalysis> SA;
224 std::unique_ptr<SplitEditor> SE;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000225
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000226 /// Cached per-block interference maps
227 InterferenceCache IntfCache;
228
Jakob Stoklund Olesen6d2bbc12011-04-07 17:27:46 +0000229 /// All basic blocks where the current register has uses.
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000230 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000231
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000232 /// Global live range splitting candidate info.
233 struct GlobalSplitCandidate {
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000234 // Register intended for assignment, or 0.
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000235 unsigned PhysReg;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000236
237 // SplitKit interval index for this candidate.
238 unsigned IntvIdx;
239
240 // Interference for PhysReg.
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +0000241 InterferenceCache::Cursor Intf;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000242
243 // Bundles where this candidate should be live.
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000244 BitVector LiveBundles;
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000245 SmallVector<unsigned, 8> ActiveBlocks;
246
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +0000247 void reset(InterferenceCache &Cache, unsigned Reg) {
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000248 PhysReg = Reg;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000249 IntvIdx = 0;
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +0000250 Intf.setPhysReg(Cache, Reg);
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000251 LiveBundles.clear();
252 ActiveBlocks.clear();
253 }
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000254
255 // Set B[i] = C for every live bundle where B[i] was NoCand.
256 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
257 unsigned Count = 0;
258 for (int i = LiveBundles.find_first(); i >= 0;
259 i = LiveBundles.find_next(i))
260 if (B[i] == NoCand) {
261 B[i] = C;
262 Count++;
263 }
264 return Count;
265 }
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000266 };
267
Aditya Nandakumarc1fd0dd2013-11-19 23:51:32 +0000268 /// Candidate info for each PhysReg in AllocationOrder.
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000269 /// This vector never shrinks, but grows to the size of the largest register
270 /// class.
271 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
272
Alp Toker61007d82014-03-02 03:20:38 +0000273 enum : unsigned { NoCand = ~0u };
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000274
275 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
276 /// NoCand which indicates the stack interval.
277 SmallVector<unsigned, 32> BundleCand;
278
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +0000279 /// Callee-save register cost, calculated once per machine function.
280 BlockFrequency CSRCost;
281
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000282public:
283 RAGreedy();
284
285 /// Return the pass name.
Craig Topper4584cd52014-03-07 09:26:03 +0000286 const char* getPassName() const override {
Jakob Stoklund Olesen92da7052010-12-11 00:19:56 +0000287 return "Greedy Register Allocator";
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000288 }
289
290 /// RAGreedy analysis usage.
Craig Topper4584cd52014-03-07 09:26:03 +0000291 void getAnalysisUsage(AnalysisUsage &AU) const override;
292 void releaseMemory() override;
293 Spiller &spiller() override { return *SpillerInstance; }
294 void enqueue(LiveInterval *LI) override;
295 LiveInterval *dequeue() override;
296 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000297
298 /// Perform register allocation.
Craig Topper4584cd52014-03-07 09:26:03 +0000299 bool runOnMachineFunction(MachineFunction &mf) override;
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000300
301 static char ID;
Andrew Trickccef0982010-12-09 18:15:21 +0000302
303private:
Quentin Colombet87769712014-02-05 22:13:59 +0000304 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
305 SmallVirtRegSet &, unsigned = 0);
306
Craig Topper4584cd52014-03-07 09:26:03 +0000307 bool LRE_CanEraseVirtReg(unsigned) override;
308 void LRE_WillShrinkVirtReg(unsigned) override;
309 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
Quentin Colombet87769712014-02-05 22:13:59 +0000310 void enqueue(PQueue &CurQueue, LiveInterval *LI);
311 LiveInterval *dequeue(PQueue &CurQueue);
Jakob Stoklund Olesen8e089642011-03-09 00:57:29 +0000312
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +0000313 BlockFrequency calcSpillCost();
314 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000315 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +0000316 void growRegion(GlobalSplitCandidate &Cand);
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +0000317 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
Jakob Stoklund Olesenecad62f2011-07-23 03:41:57 +0000318 bool calcCompactRegion(GlobalSplitCandidate&);
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +0000319 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +0000320 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
Andrew Trick8bb0a252013-07-25 18:35:19 +0000321 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000322 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
323 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
324 void evictInterference(LiveInterval&, unsigned,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000325 SmallVectorImpl<unsigned>&);
Quentin Colombet87769712014-02-05 22:13:59 +0000326 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
327 SmallLISet &RecoloringCandidates,
328 const SmallVirtRegSet &FixedRegisters);
Jakob Stoklund Olesen3d7b8062010-12-14 00:37:44 +0000329
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000330 unsigned tryAssign(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000331 SmallVectorImpl<unsigned>&);
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000332 unsigned tryEvict(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000333 SmallVectorImpl<unsigned>&, unsigned = ~0u);
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000334 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000335 SmallVectorImpl<unsigned>&);
Manman Ren9db66b32014-03-24 23:23:42 +0000336 /// Calculate cost of region splitting.
337 unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
338 AllocationOrder &Order,
339 BlockFrequency &BestCost,
Manman Ren78cf02a2014-03-25 00:16:25 +0000340 unsigned &NumCands, bool IgnoreCSR);
Manman Ren9db66b32014-03-24 23:23:42 +0000341 /// Perform region splitting.
342 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
343 bool HasCompact,
344 SmallVectorImpl<unsigned> &NewVRegs);
Manman Ren9dee4492014-03-27 21:21:57 +0000345 /// Check other options before using a callee-saved register for the first
346 /// time.
347 unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
348 unsigned PhysReg, unsigned &CostPerUseLimit,
349 SmallVectorImpl<unsigned> &NewVRegs);
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +0000350 void initializeCSRCost();
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +0000351 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000352 SmallVectorImpl<unsigned>&);
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +0000353 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000354 SmallVectorImpl<unsigned>&);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +0000355 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000356 SmallVectorImpl<unsigned>&);
Jakob Stoklund Olesen3d7b8062010-12-14 00:37:44 +0000357 unsigned trySplit(LiveInterval&, AllocationOrder&,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000358 SmallVectorImpl<unsigned>&);
Quentin Colombet87769712014-02-05 22:13:59 +0000359 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
360 SmallVectorImpl<unsigned> &,
361 SmallVirtRegSet &, unsigned);
362 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
363 SmallVirtRegSet &, unsigned);
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000364};
365} // end anonymous namespace
366
367char RAGreedy::ID = 0;
368
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000369#ifndef NDEBUG
370const char *const RAGreedy::StageName[] = {
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000371 "RS_New",
372 "RS_Assign",
373 "RS_Split",
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +0000374 "RS_Split2",
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000375 "RS_Spill",
376 "RS_Done"
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000377};
378#endif
379
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +0000380// Hysteresis to use when comparing floats.
381// This helps stabilize decisions based on float comparisons.
NAKAMURA Takumia71003a2014-02-04 06:29:38 +0000382const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +0000383
384
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000385FunctionPass* llvm::createGreedyRegisterAllocator() {
386 return new RAGreedy();
387}
388
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000389RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
Jakob Stoklund Olesen6aa0fbf2011-04-05 21:40:37 +0000390 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000391 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000392 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
393 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
Rafael Espindola676c4052011-06-26 22:34:10 +0000394 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
Andrew Tricke1c034f2012-01-17 06:55:03 +0000395 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000396 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
397 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
398 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
399 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000400 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000401 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
402 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000403}
404
405void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
406 AU.setPreservesCFG();
Benjamin Kramere2a1d892013-06-17 19:00:36 +0000407 AU.addRequired<MachineBlockFrequencyInfo>();
408 AU.addPreserved<MachineBlockFrequencyInfo>();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000409 AU.addRequired<AliasAnalysis>();
410 AU.addPreserved<AliasAnalysis>();
411 AU.addRequired<LiveIntervals>();
Jakob Stoklund Olesen12243122012-06-08 23:44:45 +0000412 AU.addPreserved<LiveIntervals>();
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000413 AU.addRequired<SlotIndexes>();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000414 AU.addPreserved<SlotIndexes>();
Jakob Stoklund Olesen6aa0fbf2011-04-05 21:40:37 +0000415 AU.addRequired<LiveDebugVariables>();
416 AU.addPreserved<LiveDebugVariables>();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000417 AU.addRequired<LiveStacks>();
418 AU.addPreserved<LiveStacks>();
Jakob Stoklund Olesen1740e002010-12-17 23:16:32 +0000419 AU.addRequired<MachineDominatorTree>();
420 AU.addPreserved<MachineDominatorTree>();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000421 AU.addRequired<MachineLoopInfo>();
422 AU.addPreserved<MachineLoopInfo>();
423 AU.addRequired<VirtRegMap>();
424 AU.addPreserved<VirtRegMap>();
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000425 AU.addRequired<LiveRegMatrix>();
426 AU.addPreserved<LiveRegMatrix>();
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000427 AU.addRequired<EdgeBundles>();
428 AU.addRequired<SpillPlacement>();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000429 MachineFunctionPass::getAnalysisUsage(AU);
430}
431
Jakob Stoklund Olesen8e089642011-03-09 00:57:29 +0000432
433//===----------------------------------------------------------------------===//
434// LiveRangeEdit delegate methods
435//===----------------------------------------------------------------------===//
436
Jakob Stoklund Olesen43a87502011-03-13 01:23:11 +0000437bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000438 if (VRM->hasPhys(VirtReg)) {
439 Matrix->unassign(LIS->getInterval(VirtReg));
Jakob Stoklund Olesen43a87502011-03-13 01:23:11 +0000440 return true;
441 }
442 // Unassigned virtreg is probably in the priority queue.
443 // RegAllocBase will erase it after dequeueing.
444 return false;
445}
Jakob Stoklund Olesen8e089642011-03-09 00:57:29 +0000446
Jakob Stoklund Olesene14b2b22011-03-16 22:56:16 +0000447void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000448 if (!VRM->hasPhys(VirtReg))
Jakob Stoklund Olesene14b2b22011-03-16 22:56:16 +0000449 return;
450
451 // Register is assigned, put it back on the queue for reassignment.
452 LiveInterval &LI = LIS->getInterval(VirtReg);
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000453 Matrix->unassign(LI);
Jakob Stoklund Olesene14b2b22011-03-16 22:56:16 +0000454 enqueue(&LI);
455}
456
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000457void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
Jakob Stoklund Olesen811b9c42011-09-14 17:34:37 +0000458 // Cloning a register we haven't even heard about yet? Just ignore it.
459 if (!ExtraRegInfo.inBounds(Old))
460 return;
461
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000462 // LRE may clone a virtual register because dead code elimination causes it to
Jakob Stoklund Olesen5387bd32011-07-26 00:54:56 +0000463 // be split into connected components. The new components are much smaller
464 // than the original, so they should get a new chance at being assigned.
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000465 // same stage as the parent.
Jakob Stoklund Olesen5387bd32011-07-26 00:54:56 +0000466 ExtraRegInfo[Old].Stage = RS_Assign;
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000467 ExtraRegInfo.grow(New);
468 ExtraRegInfo[New] = ExtraRegInfo[Old];
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000469}
470
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000471void RAGreedy::releaseMemory() {
472 SpillerInstance.reset(0);
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000473 ExtraRegInfo.clear();
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000474 GlobalCand.clear();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +0000475}
476
Quentin Colombet87769712014-02-05 22:13:59 +0000477void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
478
479void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
Jakob Stoklund Olesen2329c542011-02-22 23:01:52 +0000480 // Prioritize live ranges by size, assigning larger ranges first.
481 // The queue holds (size, reg) pairs.
Jakob Stoklund Olesene68a27e2011-02-24 23:21:36 +0000482 const unsigned Size = LI->getSize();
483 const unsigned Reg = LI->reg;
Jakob Stoklund Olesen2329c542011-02-22 23:01:52 +0000484 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
485 "Can only enqueue virtual registers");
Jakob Stoklund Olesene68a27e2011-02-24 23:21:36 +0000486 unsigned Prio;
Jakob Stoklund Oleseneaa650a2010-12-08 22:57:16 +0000487
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000488 ExtraRegInfo.grow(Reg);
489 if (ExtraRegInfo[Reg].Stage == RS_New)
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000490 ExtraRegInfo[Reg].Stage = RS_Assign;
Jakob Stoklund Olesendd9a2ec2011-03-30 02:52:39 +0000491
Jakob Stoklund Olesencad845f2011-07-28 20:48:23 +0000492 if (ExtraRegInfo[Reg].Stage == RS_Split) {
Jakob Stoklund Olesen28d79cd2011-03-27 22:49:21 +0000493 // Unsplit ranges that couldn't be allocated immediately are deferred until
Jakob Stoklund Olesen45df7e02011-09-12 16:54:42 +0000494 // everything else has been allocated.
495 Prio = Size;
Jakob Stoklund Olesencad845f2011-07-28 20:48:23 +0000496 } else {
Andrew Trick52a00932014-02-26 22:07:26 +0000497 // Giant live ranges fall back to the global assignment heuristic, which
498 // prevents excessive spilling in pathological cases.
499 bool ReverseLocal = TRI->reverseLocalAssignment();
Andrew Trickb1531e52014-02-27 21:37:33 +0000500 bool ForceGlobal = !ReverseLocal && TRI->mayOverrideLocalAssignment() &&
Andrew Trick52a00932014-02-26 22:07:26 +0000501 (Size / SlotIndex::InstrDist) > (2 * MRI->getRegClass(Reg)->getNumRegs());
502
503 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
Andrew Trick84852572013-07-25 18:35:14 +0000504 LIS->intervalIsInOneMBB(*LI)) {
505 // Allocate original local ranges in linear instruction order. Since they
506 // are singly defined, this produces optimal coloring in the absence of
507 // global interference and other constraints.
Andrew Trick52a00932014-02-26 22:07:26 +0000508 if (!ReverseLocal)
Andrew Trick2d8826a2013-12-11 03:40:15 +0000509 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
510 else {
511 // Allocating bottom up may allow many short LRGs to be assigned first
512 // to one of the cheap registers. This could be much faster for very
513 // large blocks on targets with many physical registers.
514 Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex());
515 }
Andrew Trick84852572013-07-25 18:35:14 +0000516 }
517 else {
518 // Allocate global and split ranges in long->short order. Long ranges that
519 // don't fit should be spilled (or split) ASAP so they don't create
520 // interference. Mark a bit to prioritize global above local ranges.
521 Prio = (1u << 29) + Size;
522 }
523 // Mark a higher bit to prioritize global and local above RS_Split.
524 Prio |= (1u << 31);
Jakob Stoklund Olesenb51f65c2011-02-23 00:56:56 +0000525
Jakob Stoklund Olesen28d79cd2011-03-27 22:49:21 +0000526 // Boost ranges that have a physical register hint.
Jakob Stoklund Olesen74052b02012-12-03 23:23:50 +0000527 if (VRM->hasKnownPreference(Reg))
Jakob Stoklund Olesen28d79cd2011-03-27 22:49:21 +0000528 Prio |= (1u << 30);
529 }
Andrew Trickf4b1ee32013-07-25 18:35:22 +0000530 // The virtual register number is a tie breaker for same-sized ranges.
531 // Give lower vreg numbers higher priority to assign them first.
Quentin Colombet87769712014-02-05 22:13:59 +0000532 CurQueue.push(std::make_pair(Prio, ~Reg));
Jakob Stoklund Oleseneaa650a2010-12-08 22:57:16 +0000533}
534
Quentin Colombet87769712014-02-05 22:13:59 +0000535LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
536
537LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
538 if (CurQueue.empty())
Jakob Stoklund Olesen2329c542011-02-22 23:01:52 +0000539 return 0;
Quentin Colombet87769712014-02-05 22:13:59 +0000540 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
541 CurQueue.pop();
Jakob Stoklund Olesen2329c542011-02-22 23:01:52 +0000542 return LI;
543}
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +0000544
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000545
546//===----------------------------------------------------------------------===//
547// Direct Assignment
548//===----------------------------------------------------------------------===//
549
550/// tryAssign - Try to assign VirtReg to an available register.
551unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
552 AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000553 SmallVectorImpl<unsigned> &NewVRegs) {
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000554 Order.rewind();
555 unsigned PhysReg;
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000556 while ((PhysReg = Order.next()))
557 if (!Matrix->checkInterference(VirtReg, PhysReg))
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000558 break;
Jakob Stoklund Olesen3cb2cb82012-12-04 22:25:16 +0000559 if (!PhysReg || Order.isHint())
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000560 return PhysReg;
561
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000562 // PhysReg is available, but there may be a better choice.
563
564 // If we missed a simple hint, try to cheaply evict interference from the
565 // preferred register.
566 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000567 if (Order.isHint(Hint)) {
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000568 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
Andrew Trick3621b8a2013-11-22 19:07:38 +0000569 EvictionCost MaxCost;
570 MaxCost.setBrokenHints(1);
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000571 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
572 evictInterference(VirtReg, Hint, NewVRegs);
573 return Hint;
574 }
575 }
576
577 // Try to evict interference from a cheaper alternative.
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000578 unsigned Cost = TRI->getCostPerUse(PhysReg);
579
580 // Most registers have 0 additional cost.
581 if (!Cost)
582 return PhysReg;
583
584 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
585 << '\n');
586 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
587 return CheapReg ? CheapReg : PhysReg;
588}
589
590
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +0000591//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000592// Interference eviction
593//===----------------------------------------------------------------------===//
594
Andrew Trick8bb0a252013-07-25 18:35:19 +0000595unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
596 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
597 unsigned PhysReg;
598 while ((PhysReg = Order.next())) {
599 if (PhysReg == PrevReg)
600 continue;
601
602 MCRegUnitIterator Units(PhysReg, TRI);
603 for (; Units.isValid(); ++Units) {
604 // Instantiate a "subquery", not to be confused with the Queries array.
605 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
606 if (subQ.checkInterference())
607 break;
608 }
609 // If no units have interference, break out with the current PhysReg.
610 if (!Units.isValid())
611 break;
612 }
613 if (PhysReg)
614 DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
615 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
616 << '\n');
617 return PhysReg;
618}
619
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000620/// shouldEvict - determine if A should evict the assigned live range B. The
621/// eviction policy defined by this function together with the allocation order
622/// defined by enqueue() decides which registers ultimately end up being split
623/// and spilled.
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000624///
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000625/// Cascade numbers are used to prevent infinite loops if this function is a
626/// cyclic relation.
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000627///
628/// @param A The live range to be assigned.
629/// @param IsHint True when A is about to be assigned to its preferred
630/// register.
631/// @param B The live range to be evicted.
632/// @param BreaksHint True when B is already assigned to its preferred register.
633bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
634 LiveInterval &B, bool BreaksHint) {
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +0000635 bool CanSplit = getStage(B) < RS_Spill;
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000636
637 // Be fairly aggressive about following hints as long as the evictee can be
638 // split.
639 if (CanSplit && IsHint && !BreaksHint)
640 return true;
641
Andrew Trick059e8002013-11-22 19:07:42 +0000642 if (A.weight > B.weight) {
643 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
644 return true;
645 }
646 return false;
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +0000647}
648
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000649/// canEvictInterference - Return true if all interferences between VirtReg and
Manman Renfa32ca12014-02-25 19:47:15 +0000650/// PhysReg can be evicted.
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000651///
652/// @param VirtReg Live range that is about to be assigned.
653/// @param PhysReg Desired register for assignment.
Dmitri Gribenko881929c2012-09-12 16:59:47 +0000654/// @param IsHint True when PhysReg is VirtReg's preferred register.
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000655/// @param MaxCost Only look for cheaper candidates and update with new cost
656/// when returning true.
657/// @returns True when interference can be evicted cheaper than MaxCost.
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000658bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000659 bool IsHint, EvictionCost &MaxCost) {
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000660 // It is only possible to evict virtual register interference.
661 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
662 return false;
663
Andrew Trick84852572013-07-25 18:35:14 +0000664 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
665
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000666 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
667 // involved in an eviction before. If a cascade number was assigned, deny
668 // evicting anything with the same or a newer cascade number. This prevents
669 // infinite eviction loops.
670 //
671 // This works out so a register without a cascade number is allowed to evict
672 // anything, and it can be evicted by anything.
673 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
674 if (!Cascade)
675 Cascade = NextCascade;
676
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000677 EvictionCost Cost;
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000678 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
679 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
Jakob Stoklund Olesen0f175eb2011-04-11 21:47:01 +0000680 // If there is 10 or more interferences, chances are one is heavier.
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000681 if (Q.collectInterferingVRegs(10) >= 10)
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000682 return false;
683
Jakob Stoklund Olesen0f175eb2011-04-11 21:47:01 +0000684 // Check if any interfering live range is heavier than MaxWeight.
685 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
686 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000687 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
688 "Only expecting virtual register interference from query");
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000689 // Never evict spill products. They cannot split or spill.
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +0000690 if (getStage(*Intf) == RS_Done)
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +0000691 return false;
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000692 // Once a live range becomes small enough, it is urgent that we find a
693 // register for it. This is indicated by an infinite spill weight. These
694 // urgent live ranges get to evict almost anything.
Jakob Stoklund Olesen05e22452012-05-30 21:46:58 +0000695 //
696 // Also allow urgent evictions of unspillable ranges from a strictly
697 // larger allocation order.
698 bool Urgent = !VirtReg.isSpillable() &&
699 (Intf->isSpillable() ||
700 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
701 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000702 // Only evict older cascades or live ranges without a cascade.
703 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
704 if (Cascade <= IntfCascade) {
705 if (!Urgent)
706 return false;
707 // We permit breaking cascades for urgent evictions. It should be the
708 // last resort, though, so make it really expensive.
709 Cost.BrokenHints += 10;
710 }
711 // Would this break a satisfied hint?
712 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
713 // Update eviction cost.
714 Cost.BrokenHints += BreaksHint;
715 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
716 // Abort if this would be too expensive.
717 if (!(Cost < MaxCost))
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000718 return false;
Andrew Trick84852572013-07-25 18:35:14 +0000719 if (Urgent)
720 continue;
Andrew Trickc2ab53a2013-11-29 23:49:38 +0000721 // Apply the eviction policy for non-urgent evictions.
722 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
723 return false;
Andrew Trick84852572013-07-25 18:35:14 +0000724 // If !MaxCost.isMax(), then we're just looking for a cheap register.
725 // Evicting another local live range in this case could lead to suboptimal
726 // coloring.
Andrew Trick8bb0a252013-07-25 18:35:19 +0000727 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
728 !canReassign(*Intf, PhysReg)) {
Andrew Trick84852572013-07-25 18:35:14 +0000729 return false;
Andrew Trick8bb0a252013-07-25 18:35:19 +0000730 }
Jakob Stoklund Olesen1305bc02011-02-09 01:14:03 +0000731 }
732 }
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000733 MaxCost = Cost;
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000734 return true;
735}
Jakob Stoklund Olesen1305bc02011-02-09 01:14:03 +0000736
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000737/// evictInterference - Evict any interferring registers that prevent VirtReg
738/// from being assigned to Physreg. This assumes that canEvictInterference
739/// returned true.
740void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000741 SmallVectorImpl<unsigned> &NewVRegs) {
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000742 // Make sure that VirtReg has a cascade number, and assign that cascade
743 // number to every evicted register. These live ranges than then only be
744 // evicted by a newer cascade, preventing infinite loops.
745 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
746 if (!Cascade)
747 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
748
749 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
750 << " interference: Cascade " << Cascade << '\n');
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000751
752 // Collect all interfering virtregs first.
753 SmallVector<LiveInterval*, 8> Intfs;
754 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
755 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000756 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +0000757 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
758 Intfs.append(IVR.begin(), IVR.end());
759 }
760
761 // Evict them second. This will invalidate the queries.
762 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
763 LiveInterval *Intf = Intfs[i];
764 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
765 if (!VRM->hasPhys(Intf->reg))
766 continue;
767 Matrix->unassign(*Intf);
768 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
769 VirtReg.isSpillable() < Intf->isSpillable()) &&
770 "Cannot decrease cascade number, illegal eviction");
771 ExtraRegInfo[Intf->reg].Cascade = Cascade;
772 ++NumEvicted;
Mark Laceyf9ea8852013-08-14 23:50:04 +0000773 NewVRegs.push_back(Intf->reg);
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000774 }
775}
776
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000777/// tryEvict - Try to evict all interferences for a physreg.
Jakob Stoklund Olesene9cc8e92011-06-01 18:45:02 +0000778/// @param VirtReg Currently unassigned virtual register.
779/// @param Order Physregs to try.
780/// @return Physreg to assign VirtReg, or 0.
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000781unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
782 AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +0000783 SmallVectorImpl<unsigned> &NewVRegs,
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000784 unsigned CostPerUseLimit) {
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000785 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
786
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000787 // Keep track of the cheapest interference seen so far.
Andrew Trick3621b8a2013-11-22 19:07:38 +0000788 EvictionCost BestCost;
789 BestCost.setMax();
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000790 unsigned BestPhys = 0;
Jakob Stoklund Olesen3dd236c2013-01-12 00:57:44 +0000791 unsigned OrderLimit = Order.getOrder().size();
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000792
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000793 // When we are just looking for a reduced cost per use, don't break any
794 // hints, and only evict smaller spill weights.
795 if (CostPerUseLimit < ~0u) {
796 BestCost.BrokenHints = 0;
797 BestCost.MaxWeight = VirtReg.weight;
Jakob Stoklund Olesen3dd236c2013-01-12 00:57:44 +0000798
799 // Check of any registers in RC are below CostPerUseLimit.
800 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
801 unsigned MinCost = RegClassInfo.getMinCost(RC);
802 if (MinCost >= CostPerUseLimit) {
803 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost
804 << ", no cheaper registers to be found.\n");
805 return 0;
806 }
807
808 // It is normal for register classes to have a long tail of registers with
809 // the same cost. We don't need to look at them if they're too expensive.
810 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
811 OrderLimit = RegClassInfo.getLastCostChange(RC);
812 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
813 }
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000814 }
815
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000816 Order.rewind();
Aditya Nandakumar73f3d332013-12-05 21:18:40 +0000817 while (unsigned PhysReg = Order.next(OrderLimit)) {
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000818 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
819 continue;
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000820 // The first use of a callee-saved register in a function has cost 1.
821 // Don't start using a CSR when the CostPerUseLimit is low.
822 if (CostPerUseLimit == 1)
823 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
824 if (!MRI->isPhysRegUsed(CSR)) {
825 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
826 << PrintReg(CSR, TRI) << '\n');
827 continue;
828 }
Jakob Stoklund Olesen0e34c1d2011-04-20 18:19:48 +0000829
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000830 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000831 continue;
832
833 // Best so far.
834 BestPhys = PhysReg;
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000835
Jakob Stoklund Olesen9918b332011-02-25 01:04:22 +0000836 // Stop if the hint can be used.
Jakob Stoklund Olesen3cb2cb82012-12-04 22:25:16 +0000837 if (Order.isHint())
Jakob Stoklund Olesen9918b332011-02-25 01:04:22 +0000838 break;
Jakob Stoklund Olesen1305bc02011-02-09 01:14:03 +0000839 }
840
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000841 if (!BestPhys)
842 return 0;
843
Jakob Stoklund Olesen4931bbc2011-07-08 20:46:18 +0000844 evictInterference(VirtReg, BestPhys, NewVRegs);
Jakob Stoklund Olesen6bd68cd2011-02-23 00:29:52 +0000845 return BestPhys;
Andrew Trickccef0982010-12-09 18:15:21 +0000846}
847
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +0000848
849//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000850// Region Splitting
851//===----------------------------------------------------------------------===//
852
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000853/// addSplitConstraints - Fill out the SplitConstraints vector based on the
854/// interference pattern in Physreg and its aliases. Add the constraints to
855/// SpillPlacement and return the static cost of this split in Cost, assuming
856/// that all preferences in SplitConstraints are met.
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000857/// Return false if there are no bundles with positive bias.
858bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +0000859 BlockFrequency &Cost) {
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000860 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000861
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000862 // Reset interference dependent info.
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000863 SplitConstraints.resize(UseBlocks.size());
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +0000864 BlockFrequency StaticCost = 0;
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000865 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
866 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000867 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000868
Jakob Stoklund Olesenb1b76ad2011-02-09 22:50:26 +0000869 BC.Number = BI.MBB->getNumber();
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000870 Intf.moveToBlock(BC.Number);
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000871 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
872 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
David Blaikie041f1aa2013-05-15 07:36:59 +0000873 BC.ChangesValue = BI.FirstDef.isValid();
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000874
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000875 if (!Intf.hasInterference())
876 continue;
877
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000878 // Number of spill code instructions to insert.
879 unsigned Ins = 0;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000880
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000881 // Interference for the live-in value.
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000882 if (BI.LiveIn) {
Jakob Stoklund Olesen89339072011-04-04 15:32:15 +0000883 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000884 BC.Entry = SpillPlacement::MustSpill, ++Ins;
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +0000885 else if (Intf.first() < BI.FirstInstr)
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000886 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +0000887 else if (Intf.first() < BI.LastInstr)
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000888 ++Ins;
Jakob Stoklund Olesenf248b202011-02-08 23:02:58 +0000889 }
890
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000891 // Interference for the live-out value.
Jakob Stoklund Olesenca26e0a2011-04-02 06:03:38 +0000892 if (BI.LiveOut) {
Jakob Stoklund Olesend93b0e32011-04-05 04:20:29 +0000893 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000894 BC.Exit = SpillPlacement::MustSpill, ++Ins;
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +0000895 else if (Intf.last() > BI.LastInstr)
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000896 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +0000897 else if (Intf.last() > BI.FirstInstr)
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000898 ++Ins;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000899 }
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +0000900
901 // Accumulate the total frequency of inserted spill code.
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +0000902 while (Ins--)
903 StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000904 }
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000905 Cost = StaticCost;
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000906
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000907 // Add constraints for use-blocks. Note that these are the only constraints
908 // that may add a positive bias, it is downhill from here.
909 SpillPlacer->addConstraints(SplitConstraints);
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000910 return SpillPlacer->scanActiveBundles();
911}
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000912
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000913
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000914/// addThroughConstraints - Add constraints and links to SpillPlacer from the
915/// live-through blocks in Blocks.
916void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
917 ArrayRef<unsigned> Blocks) {
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000918 const unsigned GroupSize = 8;
919 SpillPlacement::BlockConstraint BCS[GroupSize];
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000920 unsigned TBS[GroupSize];
921 unsigned B = 0, T = 0;
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000922
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000923 for (unsigned i = 0; i != Blocks.size(); ++i) {
924 unsigned Number = Blocks[i];
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000925 Intf.moveToBlock(Number);
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000926
Jakob Stoklund Olesen6d2bbc12011-04-07 17:27:46 +0000927 if (!Intf.hasInterference()) {
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000928 assert(T < GroupSize && "Array overflow");
929 TBS[T] = Number;
930 if (++T == GroupSize) {
Frits van Bommel717d7ed2011-07-18 12:00:32 +0000931 SpillPlacer->addLinks(makeArrayRef(TBS, T));
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000932 T = 0;
933 }
Jakob Stoklund Olesen6d2bbc12011-04-07 17:27:46 +0000934 continue;
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000935 }
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000936
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000937 assert(B < GroupSize && "Array overflow");
938 BCS[B].Number = Number;
939
Jakob Stoklund Olesen6d2bbc12011-04-07 17:27:46 +0000940 // Interference for the live-in value.
941 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
942 BCS[B].Entry = SpillPlacement::MustSpill;
943 else
944 BCS[B].Entry = SpillPlacement::PrefSpill;
945
946 // Interference for the live-out value.
947 if (Intf.last() >= SA->getLastSplitPoint(Number))
948 BCS[B].Exit = SpillPlacement::MustSpill;
949 else
950 BCS[B].Exit = SpillPlacement::PrefSpill;
951
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000952 if (++B == GroupSize) {
953 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
954 SpillPlacer->addConstraints(Array);
955 B = 0;
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000956 }
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +0000957 }
958
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +0000959 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
960 SpillPlacer->addConstraints(Array);
Frits van Bommel717d7ed2011-07-18 12:00:32 +0000961 SpillPlacer->addLinks(makeArrayRef(TBS, T));
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +0000962}
963
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +0000964void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000965 // Keep track of through blocks that have not been added to SpillPlacer.
966 BitVector Todo = SA->getThroughBlocks();
967 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
968 unsigned AddedTo = 0;
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000969#ifndef NDEBUG
970 unsigned Visited = 0;
971#endif
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000972
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000973 for (;;) {
974 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000975 // Find new through blocks in the periphery of PrefRegBundles.
976 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
977 unsigned Bundle = NewBundles[i];
978 // Look at all blocks connected to Bundle in the full graph.
979 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
980 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
981 I != E; ++I) {
982 unsigned Block = *I;
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000983 if (!Todo.test(Block))
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000984 continue;
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000985 Todo.reset(Block);
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000986 // This is a new through block. Add it to SpillPlacer later.
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +0000987 ActiveBlocks.push_back(Block);
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +0000988#ifndef NDEBUG
989 ++Visited;
990#endif
991 }
992 }
993 // Any new blocks to add?
Jakob Stoklund Olesen91f3a302011-07-05 18:46:42 +0000994 if (ActiveBlocks.size() == AddedTo)
995 break;
Jakob Stoklund Olesena953bf12011-07-23 03:22:33 +0000996
997 // Compute through constraints from the interference, or assume that all
998 // through blocks prefer spilling when forming compact regions.
999 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
1000 if (Cand.PhysReg)
1001 addThroughConstraints(Cand.Intf, NewBlocks);
1002 else
Jakob Stoklund Olesen86954522011-08-03 23:09:38 +00001003 // Provide a strong negative bias on through blocks to prevent unwanted
1004 // liveness on loop backedges.
1005 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
Jakob Stoklund Olesen91f3a302011-07-05 18:46:42 +00001006 AddedTo = ActiveBlocks.size();
1007
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +00001008 // Perhaps iterating can enable more bundles?
1009 SpillPlacer->iterate();
1010 }
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +00001011 DEBUG(dbgs() << ", v=" << Visited);
1012}
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +00001013
Jakob Stoklund Olesenecad62f2011-07-23 03:41:57 +00001014/// calcCompactRegion - Compute the set of edge bundles that should be live
1015/// when splitting the current live range into compact regions. Compact
1016/// regions can be computed without looking at interference. They are the
1017/// regions formed by removing all the live-through blocks from the live range.
1018///
1019/// Returns false if the current live range is already compact, or if the
1020/// compact regions would form single block regions anyway.
1021bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
1022 // Without any through blocks, the live range is already compact.
1023 if (!SA->getNumThroughBlocks())
1024 return false;
1025
1026 // Compact regions don't correspond to any physreg.
1027 Cand.reset(IntfCache, 0);
1028
1029 DEBUG(dbgs() << "Compact region bundles");
1030
1031 // Use the spill placer to determine the live bundles. GrowRegion pretends
1032 // that all the through blocks have interference when PhysReg is unset.
1033 SpillPlacer->prepare(Cand.LiveBundles);
1034
1035 // The static split cost will be zero since Cand.Intf reports no interference.
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001036 BlockFrequency Cost;
Jakob Stoklund Olesenecad62f2011-07-23 03:41:57 +00001037 if (!addSplitConstraints(Cand.Intf, Cost)) {
1038 DEBUG(dbgs() << ", none.\n");
1039 return false;
1040 }
1041
1042 growRegion(Cand);
1043 SpillPlacer->finish();
1044
1045 if (!Cand.LiveBundles.any()) {
1046 DEBUG(dbgs() << ", none.\n");
1047 return false;
1048 }
1049
1050 DEBUG({
1051 for (int i = Cand.LiveBundles.find_first(); i>=0;
1052 i = Cand.LiveBundles.find_next(i))
1053 dbgs() << " EB#" << i;
1054 dbgs() << ".\n";
1055 });
1056 return true;
1057}
1058
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +00001059/// calcSpillCost - Compute how expensive it would be to split the live range in
1060/// SA around all use blocks instead of forming bundle regions.
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001061BlockFrequency RAGreedy::calcSpillCost() {
1062 BlockFrequency Cost = 0;
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +00001063 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1064 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1065 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1066 unsigned Number = BI.MBB->getNumber();
1067 // We normally only need one spill instruction - a load or a store.
1068 Cost += SpillPlacer->getBlockFrequency(Number);
1069
1070 // Unless the value is redefined in the block.
Jakob Stoklund Olesen3c145052011-08-02 23:04:08 +00001071 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
1072 Cost += SpillPlacer->getBlockFrequency(Number);
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +00001073 }
1074 return Cost;
1075}
1076
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001077/// calcGlobalSplitCost - Return the global split cost of following the split
1078/// pattern in LiveBundles. This cost should be added to the local cost of the
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +00001079/// interference pattern in SplitConstraints.
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001080///
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001081BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
1082 BlockFrequency GlobalCost = 0;
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +00001083 const BitVector &LiveBundles = Cand.LiveBundles;
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001084 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1085 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1086 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +00001087 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001088 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
1089 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
1090 unsigned Ins = 0;
1091
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001092 if (BI.LiveIn)
1093 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
1094 if (BI.LiveOut)
1095 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001096 while (Ins--)
1097 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001098 }
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001099
Jakob Stoklund Olesenc49df2c2011-04-12 21:30:53 +00001100 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
1101 unsigned Number = Cand.ActiveBlocks[i];
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001102 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
1103 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
Jakob Stoklund Olesen8ce2f432011-04-06 21:32:41 +00001104 if (!RegIn && !RegOut)
1105 continue;
1106 if (RegIn && RegOut) {
1107 // We need double spill code if this block has interference.
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001108 Cand.Intf.moveToBlock(Number);
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001109 if (Cand.Intf.hasInterference()) {
1110 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1111 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1112 }
Jakob Stoklund Olesen8ce2f432011-04-06 21:32:41 +00001113 continue;
1114 }
1115 // live-in / stack-out or stack-in live-out.
1116 GlobalCost += SpillPlacer->getBlockFrequency(Number);
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001117 }
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001118 return GlobalCost;
1119}
1120
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001121/// splitAroundRegion - Split the current live range around the regions
1122/// determined by BundleCand and GlobalCand.
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001123///
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001124/// Before calling this function, GlobalCand and BundleCand must be initialized
1125/// so each bundle is assigned to a valid candidate, or NoCand for the
1126/// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1127/// objects must be initialized for the current live range, and intervals
1128/// created for the used candidates.
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001129///
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001130/// @param LREdit The LiveRangeEdit object handling the current split.
1131/// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1132/// must appear in this list.
1133void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1134 ArrayRef<unsigned> UsedCands) {
1135 // These are the intervals created for new global ranges. We may create more
1136 // intervals for local ranges.
1137 const unsigned NumGlobalIntvs = LREdit.size();
1138 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
1139 assert(NumGlobalIntvs && "No global intervals configured");
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001140
Jakob Stoklund Olesen8627ea92011-08-05 22:20:45 +00001141 // Isolate even single instructions when dealing with a proper sub-class.
Jakob Stoklund Olesen22f37a12011-08-06 18:20:24 +00001142 // That guarantees register class inflation for the stack interval because it
Jakob Stoklund Olesen8627ea92011-08-05 22:20:45 +00001143 // is all copies.
1144 unsigned Reg = SA->getParent().reg;
1145 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1146
Jakob Stoklund Olesenadc6a4c2011-06-30 01:30:39 +00001147 // First handle all the blocks with uses.
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001148 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1149 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1150 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001151 unsigned Number = BI.MBB->getNumber();
1152 unsigned IntvIn = 0, IntvOut = 0;
1153 SlotIndex IntfIn, IntfOut;
1154 if (BI.LiveIn) {
1155 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1156 if (CandIn != NoCand) {
1157 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1158 IntvIn = Cand.IntvIdx;
1159 Cand.Intf.moveToBlock(Number);
1160 IntfIn = Cand.Intf.first();
1161 }
1162 }
1163 if (BI.LiveOut) {
1164 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1165 if (CandOut != NoCand) {
1166 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1167 IntvOut = Cand.IntvIdx;
1168 Cand.Intf.moveToBlock(Number);
1169 IntfOut = Cand.Intf.last();
1170 }
1171 }
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001172
Jakob Stoklund Olesenc70b6972011-04-12 19:32:53 +00001173 // Create separate intervals for isolated blocks with multiple uses.
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001174 if (!IntvIn && !IntvOut) {
Jakob Stoklund Olesenc70b6972011-04-12 19:32:53 +00001175 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
Jakob Stoklund Olesen8627ea92011-08-05 22:20:45 +00001176 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
Jakob Stoklund Olesenadc6a4c2011-06-30 01:30:39 +00001177 SE->splitSingleBlock(BI);
Jakob Stoklund Olesenc70b6972011-04-12 19:32:53 +00001178 continue;
1179 }
1180
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001181 if (IntvIn && IntvOut)
1182 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1183 else if (IntvIn)
1184 SE->splitRegInBlock(BI, IntvIn, IntfIn);
Jakob Stoklund Olesen795da1c2011-07-15 21:47:57 +00001185 else
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001186 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001187 }
1188
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001189 // Handle live-through blocks. The relevant live-through blocks are stored in
1190 // the ActiveBlocks list with each candidate. We need to filter out
1191 // duplicates.
1192 BitVector Todo = SA->getThroughBlocks();
1193 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1194 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1195 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1196 unsigned Number = Blocks[i];
1197 if (!Todo.test(Number))
1198 continue;
1199 Todo.reset(Number);
1200
1201 unsigned IntvIn = 0, IntvOut = 0;
1202 SlotIndex IntfIn, IntfOut;
1203
1204 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1205 if (CandIn != NoCand) {
1206 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1207 IntvIn = Cand.IntvIdx;
1208 Cand.Intf.moveToBlock(Number);
1209 IntfIn = Cand.Intf.first();
1210 }
1211
1212 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1213 if (CandOut != NoCand) {
1214 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1215 IntvOut = Cand.IntvIdx;
1216 Cand.Intf.moveToBlock(Number);
1217 IntfOut = Cand.Intf.last();
1218 }
1219 if (!IntvIn && !IntvOut)
1220 continue;
1221 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1222 }
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001223 }
1224
Jakob Stoklund Olesen99827e82011-02-17 22:53:48 +00001225 ++NumGlobalSplits;
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001226
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001227 SmallVector<unsigned, 8> IntvMap;
1228 SE->finish(&IntvMap);
Mark Laceyf9ea8852013-08-14 23:50:04 +00001229 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
Jakob Stoklund Olesenf8da0282011-05-06 18:00:02 +00001230
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +00001231 ExtraRegInfo.resize(MRI->getNumVirtRegs());
Jakob Stoklund Olesen5cc91b22011-05-28 02:32:57 +00001232 unsigned OrigBlocks = SA->getNumLiveBlocks();
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001233
1234 // Sort out the new intervals created by splitting. We get four kinds:
1235 // - Remainder intervals should not be split again.
1236 // - Candidate intervals can be assigned to Cand.PhysReg.
1237 // - Block-local splits are candidates for local splitting.
1238 // - DCE leftovers should go back on the queue.
1239 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
Mark Laceyf9ea8852013-08-14 23:50:04 +00001240 LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001241
1242 // Ignore old intervals from DCE.
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +00001243 if (getStage(Reg) != RS_New)
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001244 continue;
1245
1246 // Remainder interval. Don't try splitting again, spill if it doesn't
1247 // allocate.
1248 if (IntvMap[i] == 0) {
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00001249 setStage(Reg, RS_Spill);
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001250 continue;
1251 }
1252
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001253 // Global intervals. Allow repeated splitting as long as the number of live
1254 // blocks is strictly decreasing.
1255 if (IntvMap[i] < NumGlobalIntvs) {
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +00001256 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
Jakob Stoklund Oleseneef23272011-04-26 22:33:12 +00001257 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1258 << " blocks as original.\n");
1259 // Don't allow repeated splitting as a safe guard against looping.
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001260 setStage(Reg, RS_Split2);
Jakob Stoklund Oleseneef23272011-04-26 22:33:12 +00001261 }
1262 continue;
1263 }
1264
1265 // Other intervals are treated as new. This includes local intervals created
1266 // for blocks with multiple uses, and anything created by DCE.
Jakob Stoklund Olesen6a663b82011-04-21 18:38:15 +00001267 }
1268
Jakob Stoklund Olesen28d79cd2011-03-27 22:49:21 +00001269 if (VerifyEnabled)
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001270 MF->verify(this, "After splitting live range around region");
1271}
1272
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001273unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +00001274 SmallVectorImpl<unsigned> &NewVRegs) {
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001275 unsigned NumCands = 0;
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001276 BlockFrequency BestCost;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001277
1278 // Check if we can split this live range around a compact region.
Jakob Stoklund Olesen45df7e02011-09-12 16:54:42 +00001279 bool HasCompact = calcCompactRegion(GlobalCand.front());
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001280 if (HasCompact) {
1281 // Yes, keep GlobalCand[0] as the compact region candidate.
1282 NumCands = 1;
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001283 BestCost = BlockFrequency::getMaxFrequency();
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001284 } else {
1285 // No benefit from the compact region, our fallback will be per-block
1286 // splitting. Make sure we find a solution that is cheaper than spilling.
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001287 BestCost = calcSpillCost();
Michael Gottesmanb78dec82013-12-14 00:25:45 +00001288 DEBUG(dbgs() << "Cost of isolating all blocks = ";
1289 MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001290 }
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +00001291
Manman Ren9db66b32014-03-24 23:23:42 +00001292 unsigned BestCand =
Manman Ren78cf02a2014-03-25 00:16:25 +00001293 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
1294 false/*IgnoreCSR*/);
Manman Ren9db66b32014-03-24 23:23:42 +00001295
1296 // No solutions found, fall back to single block splitting.
1297 if (!HasCompact && BestCand == NoCand)
1298 return 0;
1299
1300 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1301}
1302
1303unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
1304 AllocationOrder &Order,
1305 BlockFrequency &BestCost,
Manman Ren78cf02a2014-03-25 00:16:25 +00001306 unsigned &NumCands,
1307 bool IgnoreCSR) {
Manman Ren9db66b32014-03-24 23:23:42 +00001308 unsigned BestCand = NoCand;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001309 Order.rewind();
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001310 while (unsigned PhysReg = Order.next()) {
Manman Ren78cf02a2014-03-25 00:16:25 +00001311 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
1312 if (IgnoreCSR && !MRI->isPhysRegUsed(CSR))
1313 continue;
1314
Jakob Stoklund Olesena153ca52011-07-14 05:35:11 +00001315 // Discard bad candidates before we run out of interference cache cursors.
1316 // This will only affect register classes with a lot of registers (>32).
1317 if (NumCands == IntfCache.getMaxCursors()) {
1318 unsigned WorstCount = ~0u;
1319 unsigned Worst = 0;
1320 for (unsigned i = 0; i != NumCands; ++i) {
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001321 if (i == BestCand || !GlobalCand[i].PhysReg)
Jakob Stoklund Olesena153ca52011-07-14 05:35:11 +00001322 continue;
1323 unsigned Count = GlobalCand[i].LiveBundles.count();
1324 if (Count < WorstCount)
1325 Worst = i, WorstCount = Count;
1326 }
1327 --NumCands;
1328 GlobalCand[Worst] = GlobalCand[NumCands];
Jakob Stoklund Olesen559d4dc2011-11-01 00:02:31 +00001329 if (BestCand == NumCands)
1330 BestCand = Worst;
Jakob Stoklund Olesena153ca52011-07-14 05:35:11 +00001331 }
1332
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001333 if (GlobalCand.size() <= NumCands)
1334 GlobalCand.resize(NumCands+1);
1335 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1336 Cand.reset(IntfCache, PhysReg);
Jakob Stoklund Olesen4b598e12011-03-05 01:10:31 +00001337
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001338 SpillPlacer->prepare(Cand.LiveBundles);
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001339 BlockFrequency Cost;
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001340 if (!addSplitConstraints(Cand.Intf, Cost)) {
Jakob Stoklund Olesened47ed42011-04-09 02:59:09 +00001341 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
Jakob Stoklund Olesen81439a82011-04-06 21:32:38 +00001342 continue;
1343 }
Michael Gottesmanb78dec82013-12-14 00:25:45 +00001344 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = ";
1345 MBFI->printBlockFreq(dbgs(), Cost));
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +00001346 if (Cost >= BestCost) {
1347 DEBUG({
1348 if (BestCand == NoCand)
1349 dbgs() << " worse than no bundles\n";
1350 else
1351 dbgs() << " worse than "
1352 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1353 });
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001354 continue;
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001355 }
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001356 growRegion(Cand);
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001357
Jakob Stoklund Olesen36b5d8a2011-04-06 19:13:57 +00001358 SpillPlacer->finish();
1359
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001360 // No live bundles, defer to splitSingleBlocks().
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001361 if (!Cand.LiveBundles.any()) {
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001362 DEBUG(dbgs() << " no bundles.\n");
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001363 continue;
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001364 }
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001365
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001366 Cost += calcGlobalSplitCost(Cand);
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001367 DEBUG({
Michael Gottesmanb78dec82013-12-14 00:25:45 +00001368 dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
1369 << " with bundles";
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001370 for (int i = Cand.LiveBundles.find_first(); i>=0;
1371 i = Cand.LiveBundles.find_next(i))
Jakob Stoklund Olesen1a9b66c2011-03-05 03:28:51 +00001372 dbgs() << " EB#" << i;
1373 dbgs() << ".\n";
1374 });
Jakob Stoklund Olesen032891b2011-04-22 22:47:40 +00001375 if (Cost < BestCost) {
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001376 BestCand = NumCands;
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001377 BestCost = Cost;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001378 }
Jakob Stoklund Olesend7e99372011-07-14 00:17:10 +00001379 ++NumCands;
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001380 }
Manman Ren9db66b32014-03-24 23:23:42 +00001381 return BestCand;
1382}
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001383
Manman Ren9db66b32014-03-24 23:23:42 +00001384unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
1385 bool HasCompact,
1386 SmallVectorImpl<unsigned> &NewVRegs) {
1387 SmallVector<unsigned, 8> UsedCands;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001388 // Prepare split editor.
Jakob Stoklund Olesene5bbe372012-05-19 05:25:46 +00001389 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
Jakob Stoklund Oleseneecb2fb2011-09-12 16:49:21 +00001390 SE->reset(LREdit, SplitSpillMode);
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001391
1392 // Assign all edge bundles to the preferred candidate, or NoCand.
1393 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1394
1395 // Assign bundles for the best candidate region.
1396 if (BestCand != NoCand) {
1397 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1398 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1399 UsedCands.push_back(BestCand);
1400 Cand.IntvIdx = SE->openIntv();
1401 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1402 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
Chandler Carruth77eb5a02011-08-03 23:07:27 +00001403 (void)B;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001404 }
1405 }
1406
1407 // Assign bundles for the compact region.
1408 if (HasCompact) {
1409 GlobalSplitCandidate &Cand = GlobalCand.front();
1410 assert(!Cand.PhysReg && "Compact region has no physreg");
1411 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1412 UsedCands.push_back(0);
1413 Cand.IntvIdx = SE->openIntv();
1414 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1415 << Cand.IntvIdx << ".\n");
Chandler Carruth77eb5a02011-08-03 23:07:27 +00001416 (void)B;
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00001417 }
1418 }
1419
1420 splitAroundRegion(LREdit, UsedCands);
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00001421 return 0;
1422}
1423
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001424
1425//===----------------------------------------------------------------------===//
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001426// Per-Block Splitting
1427//===----------------------------------------------------------------------===//
1428
1429/// tryBlockSplit - Split a global live range around every block with uses. This
1430/// creates a lot of local live ranges, that will be split by tryLocalSplit if
1431/// they don't allocate.
1432unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +00001433 SmallVectorImpl<unsigned> &NewVRegs) {
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001434 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1435 unsigned Reg = VirtReg.reg;
1436 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
Jakob Stoklund Olesene5bbe372012-05-19 05:25:46 +00001437 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
Jakob Stoklund Oleseneecb2fb2011-09-12 16:49:21 +00001438 SE->reset(LREdit, SplitSpillMode);
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001439 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1440 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1441 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1442 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1443 SE->splitSingleBlock(BI);
1444 }
1445 // No blocks were split.
1446 if (LREdit.empty())
1447 return 0;
1448
1449 // We did split for some blocks.
Jakob Stoklund Olesen02cf10b2011-08-05 23:50:31 +00001450 SmallVector<unsigned, 8> IntvMap;
1451 SE->finish(&IntvMap);
Jakob Stoklund Olesen0de95ef2011-08-05 23:10:40 +00001452
1453 // Tell LiveDebugVariables about the new ranges.
Mark Laceyf9ea8852013-08-14 23:50:04 +00001454 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
Jakob Stoklund Olesen0de95ef2011-08-05 23:10:40 +00001455
Jakob Stoklund Olesen02cf10b2011-08-05 23:50:31 +00001456 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1457
1458 // Sort out the new intervals created by splitting. The remainder interval
1459 // goes straight to spilling, the new local ranges get to stay RS_New.
1460 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
Mark Laceyf9ea8852013-08-14 23:50:04 +00001461 LiveInterval &LI = LIS->getInterval(LREdit.get(i));
Jakob Stoklund Olesen02cf10b2011-08-05 23:50:31 +00001462 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1463 setStage(LI, RS_Spill);
1464 }
1465
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001466 if (VerifyEnabled)
1467 MF->verify(this, "After splitting live range around basic blocks");
1468 return 0;
1469}
1470
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001471
1472//===----------------------------------------------------------------------===//
1473// Per-Instruction Splitting
1474//===----------------------------------------------------------------------===//
1475
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00001476/// Get the number of allocatable registers that match the constraints of \p Reg
1477/// on \p MI and that are also in \p SuperRC.
1478static unsigned getNumAllocatableRegsForConstraints(
1479 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
1480 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1481 const RegisterClassInfo &RCI) {
1482 assert(SuperRC && "Invalid register class");
1483
1484 const TargetRegisterClass *ConstrainedRC =
1485 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
1486 /* ExploreBundle */ true);
1487 if (!ConstrainedRC)
1488 return 0;
1489 return RCI.getNumAllocatableRegs(ConstrainedRC);
1490}
1491
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001492/// tryInstructionSplit - Split a live range around individual instructions.
1493/// This is normally not worthwhile since the spiller is doing essentially the
1494/// same thing. However, when the live range is in a constrained register
1495/// class, it may help to insert copies such that parts of the live range can
1496/// be moved to a larger register class.
1497///
1498/// This is similar to spilling to a larger register class.
1499unsigned
1500RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +00001501 SmallVectorImpl<unsigned> &NewVRegs) {
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00001502 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001503 // There is no point to this if there are no larger sub-classes.
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00001504 if (!RegClassInfo.isProperSubClass(CurRC))
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001505 return 0;
1506
1507 // Always enable split spill mode, since we're effectively spilling to a
1508 // register.
1509 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1510 SE->reset(LREdit, SplitEditor::SM_Size);
1511
1512 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1513 if (Uses.size() <= 1)
1514 return 0;
1515
1516 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1517
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00001518 const TargetRegisterClass *SuperRC = TRI->getLargestLegalSuperClass(CurRC);
1519 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
1520 // Split around every non-copy instruction if this split will relax
1521 // the constraints on the virtual register.
1522 // Otherwise, splitting just inserts uncoalescable copies that do not help
1523 // the allocation.
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001524 for (unsigned i = 0; i != Uses.size(); ++i) {
1525 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00001526 if (MI->isFullCopy() ||
1527 SuperRCNumAllocatableRegs ==
1528 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
1529 TRI, RCI)) {
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001530 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
1531 continue;
1532 }
1533 SE->openIntv();
1534 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1535 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1536 SE->useIntv(SegStart, SegStop);
1537 }
1538
1539 if (LREdit.empty()) {
1540 DEBUG(dbgs() << "All uses were copies.\n");
1541 return 0;
1542 }
1543
1544 SmallVector<unsigned, 8> IntvMap;
1545 SE->finish(&IntvMap);
Mark Laceyf9ea8852013-08-14 23:50:04 +00001546 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001547 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1548
1549 // Assign all new registers to RS_Spill. This was the last chance.
1550 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1551 return 0;
1552}
1553
1554
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001555//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001556// Local Splitting
1557//===----------------------------------------------------------------------===//
1558
1559
1560/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1561/// in order to use PhysReg between two entries in SA->UseSlots.
1562///
1563/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1564///
1565void RAGreedy::calcGapWeights(unsigned PhysReg,
1566 SmallVectorImpl<float> &GapWeight) {
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001567 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1568 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
Jakob Stoklund Olesen994fed62012-01-12 17:53:44 +00001569 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001570 const unsigned NumGaps = Uses.size()-1;
1571
1572 // Start and end points for the interference check.
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +00001573 SlotIndex StartIdx =
1574 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1575 SlotIndex StopIdx =
1576 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001577
1578 GapWeight.assign(NumGaps, 0.0f);
1579
1580 // Add interference from each overlapping register.
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001581 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1582 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1583 .checkInterference())
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001584 continue;
1585
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +00001586 // We know that VirtReg is a continuous interval from FirstInstr to
1587 // LastInstr, so we don't need InterferenceQuery.
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001588 //
1589 // Interference that overlaps an instruction is counted in both gaps
1590 // surrounding the instruction. The exception is interference before
1591 // StartIdx and after StopIdx.
1592 //
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001593 LiveIntervalUnion::SegmentIter IntI =
1594 Matrix->getLiveUnions()[*Units] .find(StartIdx);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001595 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1596 // Skip the gaps before IntI.
1597 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1598 if (++Gap == NumGaps)
1599 break;
1600 if (Gap == NumGaps)
1601 break;
1602
1603 // Update the gaps covered by IntI.
1604 const float weight = IntI.value()->weight;
1605 for (; Gap != NumGaps; ++Gap) {
1606 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1607 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1608 break;
1609 }
1610 if (Gap == NumGaps)
1611 break;
1612 }
1613 }
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001614
1615 // Add fixed interference.
1616 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
Matthias Braun34e1be92013-10-10 21:29:02 +00001617 const LiveRange &LR = LIS->getRegUnit(*Units);
1618 LiveRange::const_iterator I = LR.find(StartIdx);
1619 LiveRange::const_iterator E = LR.end();
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001620
1621 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1622 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1623 while (Uses[Gap+1].getBoundaryIndex() < I->start)
1624 if (++Gap == NumGaps)
1625 break;
1626 if (Gap == NumGaps)
1627 break;
1628
1629 for (; Gap != NumGaps; ++Gap) {
Aaron Ballman04999042013-11-13 00:15:44 +00001630 GapWeight[Gap] = llvm::huge_valf;
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001631 if (Uses[Gap+1].getBaseIndex() >= I->end)
1632 break;
1633 }
1634 if (Gap == NumGaps)
1635 break;
1636 }
1637 }
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001638}
1639
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001640/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1641/// basic block.
1642///
1643unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +00001644 SmallVectorImpl<unsigned> &NewVRegs) {
Jakob Stoklund Olesenbf91c4e2011-04-06 03:57:00 +00001645 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1646 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001647
1648 // Note that it is possible to have an interval that is live-in or live-out
1649 // while only covering a single block - A phi-def can use undef values from
1650 // predecessors, and the block could be a single-block loop.
1651 // We don't bother doing anything clever about such a case, we simply assume
Jakob Stoklund Olesen43859a62011-08-02 22:54:14 +00001652 // that the interval is continuous from FirstInstr to LastInstr. We should
1653 // make sure that we don't do anything illegal to such an interval, though.
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001654
Jakob Stoklund Olesen994fed62012-01-12 17:53:44 +00001655 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001656 if (Uses.size() <= 2)
1657 return 0;
1658 const unsigned NumGaps = Uses.size()-1;
1659
1660 DEBUG({
1661 dbgs() << "tryLocalSplit: ";
1662 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
Jakob Stoklund Olesen994fed62012-01-12 17:53:44 +00001663 dbgs() << ' ' << Uses[i];
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001664 dbgs() << '\n';
1665 });
1666
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001667 // If VirtReg is live across any register mask operands, compute a list of
1668 // gaps with register masks.
1669 SmallVector<unsigned, 8> RegMaskGaps;
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001670 if (Matrix->checkRegMaskInterference(VirtReg)) {
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001671 // Get regmask slots for the whole block.
1672 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001673 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001674 // Constrain to VirtReg's live range.
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001675 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1676 Uses.front().getRegSlot()) - RMS.begin();
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001677 unsigned re = RMS.size();
1678 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001679 // Look for Uses[i] <= RMS <= Uses[i+1].
1680 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1681 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001682 continue;
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001683 // Skip a regmask on the same instruction as the last use. It doesn't
1684 // overlap the live range.
1685 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1686 break;
1687 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001688 RegMaskGaps.push_back(i);
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001689 // Advance ri to the next gap. A regmask on one of the uses counts in
1690 // both gaps.
1691 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1692 ++ri;
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001693 }
Jakob Stoklund Olesenb0c0d342012-02-14 23:51:27 +00001694 DEBUG(dbgs() << '\n');
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001695 }
1696
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001697 // Since we allow local split results to be split again, there is a risk of
1698 // creating infinite loops. It is tempting to require that the new live
1699 // ranges have less instructions than the original. That would guarantee
1700 // convergence, but it is too strict. A live range with 3 instructions can be
1701 // split 2+3 (including the COPY), and we want to allow that.
1702 //
1703 // Instead we use these rules:
1704 //
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001705 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001706 // noop split, of course).
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001707 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001708 // the new ranges must have fewer instructions than before the split.
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001709 // 3. New ranges with the same number of instructions are marked RS_Split2,
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001710 // smaller ranges are marked RS_New.
1711 //
1712 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1713 // excessive splitting and infinite loops.
1714 //
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001715 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001716
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001717 // Best split candidate.
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001718 unsigned BestBefore = NumGaps;
1719 unsigned BestAfter = 0;
1720 float BestDiff = 0;
1721
Jakob Stoklund Olesenefeb3a12013-07-16 18:26:18 +00001722 const float blockFreq =
1723 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
Michael Gottesman5e985ee2013-12-14 02:37:38 +00001724 (1.0f / MBFI->getEntryFreq());
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001725 SmallVector<float, 8> GapWeight;
1726
1727 Order.rewind();
1728 while (unsigned PhysReg = Order.next()) {
1729 // Keep track of the largest spill weight that would need to be evicted in
1730 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1731 calcGapWeights(PhysReg, GapWeight);
1732
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001733 // Remove any gaps with regmask clobbers.
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001734 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001735 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
Aaron Ballman04999042013-11-13 00:15:44 +00001736 GapWeight[RegMaskGaps[i]] = llvm::huge_valf;
Jakob Stoklund Olesen17402e32012-02-11 00:42:18 +00001737
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001738 // Try to find the best sequence of gaps to close.
1739 // The new spill weight must be larger than any gap interference.
1740
1741 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001742 unsigned SplitBefore = 0, SplitAfter = 1;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001743
1744 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1745 // It is the spill weight that needs to be evicted.
1746 float MaxGap = GapWeight[0];
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001747
1748 for (;;) {
1749 // Live before/after split?
1750 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1751 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1752
1753 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1754 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1755 << " i=" << MaxGap);
1756
1757 // Stop before the interval gets so big we wouldn't be making progress.
1758 if (!LiveBefore && !LiveAfter) {
1759 DEBUG(dbgs() << " all\n");
1760 break;
1761 }
1762 // Should the interval be extended or shrunk?
1763 bool Shrink = true;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001764
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001765 // How many gaps would the new range have?
1766 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1767
1768 // Legally, without causing looping?
1769 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1770
Aaron Ballman04999042013-11-13 00:15:44 +00001771 if (Legal && MaxGap < llvm::huge_valf) {
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001772 // Estimate the new spill weight. Each instruction reads or writes the
1773 // register. Conservatively assume there are no read-modify-write
1774 // instructions.
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001775 //
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001776 // Try to guess the size of the new interval.
1777 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1778 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1779 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001780 // Would this split be possible to allocate?
1781 // Never allocate all gaps, we wouldn't be making progress.
Jakob Stoklund Olesen357dd362011-04-30 05:07:46 +00001782 DEBUG(dbgs() << " w=" << EstWeight);
1783 if (EstWeight * Hysteresis >= MaxGap) {
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001784 Shrink = false;
Jakob Stoklund Olesen357dd362011-04-30 05:07:46 +00001785 float Diff = EstWeight - MaxGap;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001786 if (Diff > BestDiff) {
1787 DEBUG(dbgs() << " (best)");
Jakob Stoklund Olesen357dd362011-04-30 05:07:46 +00001788 BestDiff = Hysteresis * Diff;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001789 BestBefore = SplitBefore;
1790 BestAfter = SplitAfter;
1791 }
1792 }
1793 }
1794
1795 // Try to shrink.
1796 if (Shrink) {
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001797 if (++SplitBefore < SplitAfter) {
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001798 DEBUG(dbgs() << " shrink\n");
1799 // Recompute the max when necessary.
1800 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1801 MaxGap = GapWeight[SplitBefore];
1802 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1803 MaxGap = std::max(MaxGap, GapWeight[i]);
1804 }
1805 continue;
1806 }
1807 MaxGap = 0;
1808 }
1809
1810 // Try to extend the interval.
1811 if (SplitAfter >= NumGaps) {
1812 DEBUG(dbgs() << " end\n");
1813 break;
1814 }
1815
1816 DEBUG(dbgs() << " extend\n");
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001817 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001818 }
1819 }
1820
1821 // Didn't find any candidates?
1822 if (BestBefore == NumGaps)
1823 return 0;
1824
1825 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1826 << '-' << Uses[BestAfter] << ", " << BestDiff
1827 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1828
Jakob Stoklund Olesene5bbe372012-05-19 05:25:46 +00001829 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
Jakob Stoklund Olesenc9601982011-03-03 01:29:13 +00001830 SE->reset(LREdit);
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001831
Jakob Stoklund Olesenc9601982011-03-03 01:29:13 +00001832 SE->openIntv();
1833 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1834 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1835 SE->useIntv(SegStart, SegStop);
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001836 SmallVector<unsigned, 8> IntvMap;
1837 SE->finish(&IntvMap);
Mark Laceyf9ea8852013-08-14 23:50:04 +00001838 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001839
1840 // If the new range has the same number of instructions as before, mark it as
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001841 // RS_Split2 so the next split will be forced to make progress. Otherwise,
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001842 // leave the new intervals as RS_New so they can compete.
1843 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1844 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1845 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1846 if (NewGaps >= NumGaps) {
1847 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1848 assert(!ProgressRequired && "Didn't make progress when it was required.");
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001849 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1850 if (IntvMap[i] == 1) {
Mark Laceyf9ea8852013-08-14 23:50:04 +00001851 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
1852 DEBUG(dbgs() << PrintReg(LREdit.get(i)));
Jakob Stoklund Olesendf476272011-06-06 23:55:20 +00001853 }
1854 DEBUG(dbgs() << '\n');
1855 }
Jakob Stoklund Olesen99827e82011-02-17 22:53:48 +00001856 ++NumLocalSplits;
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001857
1858 return 0;
1859}
1860
1861//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001862// Live Range Splitting
1863//===----------------------------------------------------------------------===//
1864
1865/// trySplit - Try to split VirtReg or one of its interferences, making it
1866/// assignable.
1867/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1868unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
Mark Laceyf9ea8852013-08-14 23:50:04 +00001869 SmallVectorImpl<unsigned>&NewVRegs) {
Jakob Stoklund Olesend4bb1d42011-08-05 23:50:33 +00001870 // Ranges must be Split2 or less.
1871 if (getStage(VirtReg) >= RS_Spill)
1872 return 0;
1873
Jakob Stoklund Olesen93c87362011-02-17 19:13:53 +00001874 // Local intervals are handled separately.
Jakob Stoklund Olesen609bc442011-02-19 00:38:40 +00001875 if (LIS->intervalIsInOneMBB(VirtReg)) {
1876 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +00001877 SA->analyze(&VirtReg);
Jakob Stoklund Olesen0ce90492012-05-23 22:37:27 +00001878 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1879 if (PhysReg || !NewVRegs.empty())
1880 return PhysReg;
1881 return tryInstructionSplit(VirtReg, Order, NewVRegs);
Jakob Stoklund Olesen609bc442011-02-19 00:38:40 +00001882 }
1883
1884 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001885
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +00001886 SA->analyze(&VirtReg);
1887
Jakob Stoklund Oleseneaa6ed12011-05-03 20:42:13 +00001888 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1889 // coalescer. That may cause the range to become allocatable which means that
1890 // tryRegionSplit won't be making progress. This check should be replaced with
1891 // an assertion when the coalescer is fixed.
1892 if (SA->didRepairRange()) {
1893 // VirtReg has changed, so all cached queries are invalid.
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00001894 Matrix->invalidateVirtRegs();
Jakob Stoklund Oleseneaa6ed12011-05-03 20:42:13 +00001895 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1896 return PhysReg;
1897 }
1898
Jakob Stoklund Olesen45011172011-07-25 15:25:43 +00001899 // First try to split around a region spanning multiple blocks. RS_Split2
1900 // ranges already made dubious progress with region splitting, so they go
1901 // straight to single block splitting.
1902 if (getStage(VirtReg) < RS_Split2) {
1903 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1904 if (PhysReg || !NewVRegs.empty())
1905 return PhysReg;
1906 }
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001907
Jakob Stoklund Olesencef5d8f2011-08-05 23:04:18 +00001908 // Then isolate blocks.
1909 return tryBlockSplit(VirtReg, Order, NewVRegs);
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00001910}
1911
Quentin Colombet87769712014-02-05 22:13:59 +00001912//===----------------------------------------------------------------------===//
1913// Last Chance Recoloring
1914//===----------------------------------------------------------------------===//
1915
1916/// mayRecolorAllInterferences - Check if the virtual registers that
1917/// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
1918/// recolored to free \p PhysReg.
1919/// When true is returned, \p RecoloringCandidates has been augmented with all
1920/// the live intervals that need to be recolored in order to free \p PhysReg
1921/// for \p VirtReg.
1922/// \p FixedRegisters contains all the virtual registers that cannot be
1923/// recolored.
1924bool
1925RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
1926 SmallLISet &RecoloringCandidates,
1927 const SmallVirtRegSet &FixedRegisters) {
1928 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
1929
1930 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1931 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1932 // If there is LastChanceRecoloringMaxInterference or more interferences,
1933 // chances are one would not be recolorable.
1934 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
1935 LastChanceRecoloringMaxInterference) {
1936 DEBUG(dbgs() << "Early abort: too many interferences.\n");
Quentin Colombet96bd2a12014-04-04 02:05:21 +00001937 CutOffInfo |= CO_Interf;
Quentin Colombet87769712014-02-05 22:13:59 +00001938 return false;
1939 }
1940 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
1941 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
1942 // If Intf is done and sit on the same register class as VirtReg,
1943 // it would not be recolorable as it is in the same state as VirtReg.
1944 if ((getStage(*Intf) == RS_Done &&
1945 MRI->getRegClass(Intf->reg) == CurRC) ||
1946 FixedRegisters.count(Intf->reg)) {
1947 DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n");
1948 return false;
1949 }
1950 RecoloringCandidates.insert(Intf);
1951 }
1952 }
1953 return true;
1954}
1955
1956/// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
1957/// its interferences.
1958/// Last chance recoloring chooses a color for \p VirtReg and recolors every
1959/// virtual register that was using it. The recoloring process may recursively
1960/// use the last chance recoloring. Therefore, when a virtual register has been
1961/// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
1962/// be last-chance-recolored again during this recoloring "session".
1963/// E.g.,
1964/// Let
1965/// vA can use {R1, R2 }
1966/// vB can use { R2, R3}
1967/// vC can use {R1 }
1968/// Where vA, vB, and vC cannot be split anymore (they are reloads for
1969/// instance) and they all interfere.
1970///
1971/// vA is assigned R1
1972/// vB is assigned R2
1973/// vC tries to evict vA but vA is already done.
1974/// Regular register allocation fails.
1975///
1976/// Last chance recoloring kicks in:
1977/// vC does as if vA was evicted => vC uses R1.
1978/// vC is marked as fixed.
1979/// vA needs to find a color.
1980/// None are available.
1981/// vA cannot evict vC: vC is a fixed virtual register now.
1982/// vA does as if vB was evicted => vA uses R2.
1983/// vB needs to find a color.
1984/// R3 is available.
1985/// Recoloring => vC = R1, vA = R2, vB = R3
1986///
Alp Toker70b36992014-02-25 04:21:15 +00001987/// \p Order defines the preferred allocation order for \p VirtReg.
Quentin Colombet87769712014-02-05 22:13:59 +00001988/// \p NewRegs will contain any new virtual register that have been created
1989/// (split, spill) during the process and that must be assigned.
1990/// \p FixedRegisters contains all the virtual registers that cannot be
1991/// recolored.
1992/// \p Depth gives the current depth of the last chance recoloring.
1993/// \return a physical register that can be used for VirtReg or ~0u if none
1994/// exists.
1995unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
1996 AllocationOrder &Order,
1997 SmallVectorImpl<unsigned> &NewVRegs,
1998 SmallVirtRegSet &FixedRegisters,
1999 unsigned Depth) {
2000 DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
2001 // Ranges must be Done.
Quentin Colombet0e3b5e02014-02-13 05:17:37 +00002002 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
Quentin Colombet87769712014-02-05 22:13:59 +00002003 "Last chance recoloring should really be last chance");
2004 // Set the max depth to LastChanceRecoloringMaxDepth.
2005 // We may want to reconsider that if we end up with a too large search space
2006 // for target with hundreds of registers.
2007 // Indeed, in that case we may want to cut the search space earlier.
2008 if (Depth >= LastChanceRecoloringMaxDepth) {
2009 DEBUG(dbgs() << "Abort because max depth has been reached.\n");
Quentin Colombet96bd2a12014-04-04 02:05:21 +00002010 CutOffInfo |= CO_Depth;
Quentin Colombet87769712014-02-05 22:13:59 +00002011 return ~0u;
2012 }
2013
2014 // Set of Live intervals that will need to be recolored.
2015 SmallLISet RecoloringCandidates;
2016 // Record the original mapping virtual register to physical register in case
2017 // the recoloring fails.
2018 DenseMap<unsigned, unsigned> VirtRegToPhysReg;
2019 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
2020 // this recoloring "session".
2021 FixedRegisters.insert(VirtReg.reg);
2022
2023 Order.rewind();
2024 while (unsigned PhysReg = Order.next()) {
2025 DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
2026 << PrintReg(PhysReg, TRI) << '\n');
2027 RecoloringCandidates.clear();
2028 VirtRegToPhysReg.clear();
2029
2030 // It is only possible to recolor virtual register interference.
2031 if (Matrix->checkInterference(VirtReg, PhysReg) >
2032 LiveRegMatrix::IK_VirtReg) {
2033 DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n");
2034
2035 continue;
2036 }
2037
2038 // Early give up on this PhysReg if it is obvious we cannot recolor all
2039 // the interferences.
2040 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2041 FixedRegisters)) {
2042 DEBUG(dbgs() << "Some inteferences cannot be recolored.\n");
2043 continue;
2044 }
2045
2046 // RecoloringCandidates contains all the virtual registers that interfer
2047 // with VirtReg on PhysReg (or one of its aliases).
2048 // Enqueue them for recoloring and perform the actual recoloring.
2049 PQueue RecoloringQueue;
2050 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2051 EndIt = RecoloringCandidates.end();
2052 It != EndIt; ++It) {
2053 unsigned ItVirtReg = (*It)->reg;
2054 enqueue(RecoloringQueue, *It);
2055 assert(VRM->hasPhys(ItVirtReg) &&
2056 "Interferences are supposed to be with allocated vairables");
2057
2058 // Record the current allocation.
2059 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
2060 // unset the related struct.
2061 Matrix->unassign(**It);
2062 }
2063
2064 // Do as if VirtReg was assigned to PhysReg so that the underlying
2065 // recoloring has the right information about the interferes and
2066 // available colors.
2067 Matrix->assign(VirtReg, PhysReg);
2068
2069 // Save the current recoloring state.
2070 // If we cannot recolor all the interferences, we will have to start again
2071 // at this point for the next physical register.
2072 SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2073 if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters,
2074 Depth)) {
2075 // Do not mess up with the global assignment process.
2076 // I.e., VirtReg must be unassigned.
2077 Matrix->unassign(VirtReg);
2078 return PhysReg;
2079 }
2080
2081 DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
2082 << PrintReg(PhysReg, TRI) << '\n');
2083
2084 // The recoloring attempt failed, undo the changes.
2085 FixedRegisters = SaveFixedRegisters;
2086 Matrix->unassign(VirtReg);
2087
2088 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2089 EndIt = RecoloringCandidates.end();
2090 It != EndIt; ++It) {
2091 unsigned ItVirtReg = (*It)->reg;
2092 if (VRM->hasPhys(ItVirtReg))
2093 Matrix->unassign(**It);
2094 Matrix->assign(**It, VirtRegToPhysReg[ItVirtReg]);
2095 }
2096 }
2097
2098 // Last chance recoloring did not worked either, give up.
2099 return ~0u;
2100}
2101
2102/// tryRecoloringCandidates - Try to assign a new color to every register
2103/// in \RecoloringQueue.
2104/// \p NewRegs will contain any new virtual register created during the
2105/// recoloring process.
2106/// \p FixedRegisters[in/out] contains all the registers that have been
2107/// recolored.
2108/// \return true if all virtual registers in RecoloringQueue were successfully
2109/// recolored, false otherwise.
2110bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2111 SmallVectorImpl<unsigned> &NewVRegs,
2112 SmallVirtRegSet &FixedRegisters,
2113 unsigned Depth) {
2114 while (!RecoloringQueue.empty()) {
2115 LiveInterval *LI = dequeue(RecoloringQueue);
2116 DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
2117 unsigned PhysReg;
2118 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
2119 if (PhysReg == ~0u || !PhysReg)
2120 return false;
2121 DEBUG(dbgs() << "Recoloring of " << *LI
2122 << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n');
2123 Matrix->assign(*LI, PhysReg);
2124 FixedRegisters.insert(LI->reg);
2125 }
2126 return true;
2127}
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00002128
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00002129//===----------------------------------------------------------------------===//
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +00002130// Main Entry Point
2131//===----------------------------------------------------------------------===//
2132
2133unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
Mark Laceyf9ea8852013-08-14 23:50:04 +00002134 SmallVectorImpl<unsigned> &NewVRegs) {
Quentin Colombet96bd2a12014-04-04 02:05:21 +00002135 CutOffInfo = CO_None;
2136 LLVMContext &Ctx = MF->getFunction()->getContext();
Quentin Colombet87769712014-02-05 22:13:59 +00002137 SmallVirtRegSet FixedRegisters;
Quentin Colombet96bd2a12014-04-04 02:05:21 +00002138 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
2139 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2140 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2141 if (CutOffEncountered == CO_Depth)
2142 Ctx.emitError(
2143 "register allocation failed: maximum depth for recoloring reached");
2144 else if (CutOffEncountered == CO_Interf)
2145 Ctx.emitError("register allocation failed: maximum interference for "
2146 "recoloring reached");
2147 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2148 Ctx.emitError("register allocation failed: maximum interference and "
2149 "depth for recoloring reached");
2150 }
2151 return Reg;
Quentin Colombet87769712014-02-05 22:13:59 +00002152}
2153
Manman Ren9dee4492014-03-27 21:21:57 +00002154/// Using a CSR for the first time has a cost because it causes push|pop
2155/// to be added to prologue|epilogue. Splitting a cold section of the live
2156/// range can have lower cost than using the CSR for the first time;
2157/// Spilling a live range in the cold path can have lower cost than using
2158/// the CSR for the first time. Returns the physical register if we decide
2159/// to use the CSR; otherwise return 0.
2160unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
2161 AllocationOrder &Order,
2162 unsigned PhysReg,
2163 unsigned &CostPerUseLimit,
2164 SmallVectorImpl<unsigned> &NewVRegs) {
Manman Ren9dee4492014-03-27 21:21:57 +00002165 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
2166 // We choose spill over using the CSR for the first time if the spill cost
2167 // is lower than CSRCost.
2168 SA->analyze(&VirtReg);
2169 if (calcSpillCost() >= CSRCost)
2170 return PhysReg;
2171
2172 // We are going to spill, set CostPerUseLimit to 1 to make sure that
2173 // we will not use a callee-saved register in tryEvict.
2174 CostPerUseLimit = 1;
2175 return 0;
2176 }
2177 if (getStage(VirtReg) < RS_Split) {
2178 // We choose pre-splitting over using the CSR for the first time if
2179 // the cost of splitting is lower than CSRCost.
2180 SA->analyze(&VirtReg);
2181 unsigned NumCands = 0;
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +00002182 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2183 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2184 NumCands, true /*IgnoreCSR*/);
Manman Ren9dee4492014-03-27 21:21:57 +00002185 if (BestCand == NoCand)
2186 // Use the CSR if we can't find a region split below CSRCost.
2187 return PhysReg;
2188
2189 // Perform the actual pre-splitting.
2190 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2191 return 0;
2192 }
2193 return PhysReg;
2194}
2195
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +00002196void RAGreedy::initializeCSRCost() {
2197 // We use the larger one out of the command-line option and the value report
2198 // by TRI.
2199 CSRCost = BlockFrequency(
2200 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
2201 if (!CSRCost.getFrequency())
2202 return;
2203
2204 // Raw cost is relative to Entry == 2^14; scale it appropriately.
2205 uint64_t ActualEntry = MBFI->getEntryFreq();
2206 if (!ActualEntry) {
2207 CSRCost = 0;
2208 return;
2209 }
2210 uint64_t FixedEntry = 1 << 14;
2211 if (ActualEntry < FixedEntry)
2212 CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2213 else if (ActualEntry <= UINT32_MAX)
2214 // Invert the fraction and divide.
2215 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2216 else
2217 // Can't use BranchProbability in general, since it takes 32-bit numbers.
2218 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2219}
2220
Quentin Colombet87769712014-02-05 22:13:59 +00002221unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
2222 SmallVectorImpl<unsigned> &NewVRegs,
2223 SmallVirtRegSet &FixedRegisters,
2224 unsigned Depth) {
Manman Ren78cf02a2014-03-25 00:16:25 +00002225 unsigned CostPerUseLimit = ~0u;
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +00002226 // First try assigning a free register.
Jakob Stoklund Olesenb8bf3c02011-06-03 20:34:53 +00002227 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
Manman Ren78cf02a2014-03-25 00:16:25 +00002228 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
2229 // We check other options if we are using a CSR for the first time.
2230 bool CSRFirstUse = false;
2231 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
2232 if (!MRI->isPhysRegUsed(CSR))
2233 CSRFirstUse = true;
2234
Manman Ren9dee4492014-03-27 21:21:57 +00002235 // When NewVRegs is not empty, we may have made decisions such as evicting
2236 // a virtual register, go with the earlier decisions and use the physical
2237 // register.
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +00002238 if (CSRCost.getFrequency() && CSRFirstUse && NewVRegs.empty()) {
Manman Ren9dee4492014-03-27 21:21:57 +00002239 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2240 CostPerUseLimit, NewVRegs);
2241 if (CSRReg || !NewVRegs.empty())
2242 // Return now if we decide to use a CSR or create new vregs due to
2243 // pre-splitting.
2244 return CSRReg;
Manman Ren78cf02a2014-03-25 00:16:25 +00002245 } else
2246 return PhysReg;
2247 }
Andrew Trickccef0982010-12-09 18:15:21 +00002248
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +00002249 LiveRangeStage Stage = getStage(VirtReg);
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +00002250 DEBUG(dbgs() << StageName[Stage]
2251 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
Jakob Stoklund Olesen25d57452011-05-25 23:58:36 +00002252
Jakob Stoklund Olesene9cc8e92011-06-01 18:45:02 +00002253 // Try to evict a less worthy live range, but only for ranges from the primary
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00002254 // queue. The RS_Split ranges already failed to do this, and they should not
Jakob Stoklund Olesene9cc8e92011-06-01 18:45:02 +00002255 // get a second chance until they have been split.
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00002256 if (Stage != RS_Split)
Manman Ren78cf02a2014-03-25 00:16:25 +00002257 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit))
Jakob Stoklund Olesene9cc8e92011-06-01 18:45:02 +00002258 return PhysReg;
Andrew Trickccef0982010-12-09 18:15:21 +00002259
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00002260 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
2261
Jakob Stoklund Olesene68a27e2011-02-24 23:21:36 +00002262 // The first time we see a live range, don't try to split or spill.
2263 // Wait until the second time, when all smaller ranges have been allocated.
2264 // This gives a better picture of the interference to split around.
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00002265 if (Stage < RS_Split) {
2266 setStage(VirtReg, RS_Split);
Jakob Stoklund Olesen86985072011-03-19 23:02:47 +00002267 DEBUG(dbgs() << "wait for second round\n");
Mark Laceyf9ea8852013-08-14 23:50:04 +00002268 NewVRegs.push_back(VirtReg.reg);
Jakob Stoklund Olesene68a27e2011-02-24 23:21:36 +00002269 return 0;
2270 }
2271
Jakob Stoklund Olesena5c88992011-05-06 21:58:30 +00002272 // If we couldn't allocate a register from spilling, there is probably some
2273 // invalid inline assembly. The base class wil report it.
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00002274 if (Stage >= RS_Done || !VirtReg.isSpillable())
Quentin Colombet87769712014-02-05 22:13:59 +00002275 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2276 Depth);
Jakob Stoklund Olesen5f9f0812011-03-01 21:10:07 +00002277
Jakob Stoklund Olesen903b6d32010-12-14 00:37:49 +00002278 // Try splitting VirtReg or interferences.
Jakob Stoklund Olesen9fb04012011-01-19 22:11:48 +00002279 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
2280 if (PhysReg || !NewVRegs.empty())
Jakob Stoklund Olesen3d7b8062010-12-14 00:37:44 +00002281 return PhysReg;
2282
Jakob Stoklund Olesen0acb69d2010-12-22 22:01:30 +00002283 // Finally spill VirtReg itself.
Jakob Stoklund Olesen92da7052010-12-11 00:19:56 +00002284 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
Jakob Stoklund Olesene5bbe372012-05-19 05:25:46 +00002285 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
Jakob Stoklund Olesen4d6eafa2011-03-10 01:51:42 +00002286 spiller().spill(LRE);
Jakob Stoklund Olesen3ef8cf12011-07-25 15:25:41 +00002287 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002288
Jakob Stoklund Olesen557a82c2011-03-16 22:56:08 +00002289 if (VerifyEnabled)
2290 MF->verify(this, "After spilling");
2291
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002292 // The live virtual register requesting allocation was spilled, so tell
2293 // the caller not to allocate anything during this round.
2294 return 0;
2295}
2296
2297bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
2298 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
David Blaikiec8c29202012-08-22 17:18:53 +00002299 << "********** Function: " << mf.getName() << '\n');
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002300
2301 MF = &mf;
Quentin Colombet1fb3362a2014-01-02 22:47:22 +00002302 TRI = MF->getTarget().getRegisterInfo();
2303 TII = MF->getTarget().getInstrInfo();
2304 RCI.runOnMachineFunction(mf);
Jakob Stoklund Olesen2e98ee32010-12-17 23:16:35 +00002305 if (VerifyEnabled)
Jakob Stoklund Olesenbf4550e2010-12-18 00:06:56 +00002306 MF->verify(this, "Before greedy register allocator");
Jakob Stoklund Olesen2e98ee32010-12-17 23:16:35 +00002307
Jakob Stoklund Olesen2d2dec92012-06-20 22:52:29 +00002308 RegAllocBase::init(getAnalysis<VirtRegMap>(),
2309 getAnalysis<LiveIntervals>(),
2310 getAnalysis<LiveRegMatrix>());
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00002311 Indexes = &getAnalysis<SlotIndexes>();
Benjamin Kramere2a1d892013-06-17 19:00:36 +00002312 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
Jakob Stoklund Olesen1740e002010-12-17 23:16:32 +00002313 DomTree = &getAnalysis<MachineDominatorTree>();
Jakob Stoklund Olesenadecb5e2010-12-10 22:54:44 +00002314 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
Jakob Stoklund Olesene7601e92010-12-15 23:46:13 +00002315 Loops = &getAnalysis<MachineLoopInfo>();
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00002316 Bundles = &getAnalysis<EdgeBundles>();
2317 SpillPlacer = &getAnalysis<SpillPlacement>();
Jakob Stoklund Olesenf8da0282011-05-06 18:00:02 +00002318 DebugVars = &getAnalysis<LiveDebugVariables>();
Jakob Stoklund Olesen267f6c12011-01-18 21:13:27 +00002319
Duncan P. N. Exon Smitha5df8132014-04-08 19:18:56 +00002320 initializeCSRCost();
2321
Arnaud A. de Grandmaisonea3ac162013-11-11 19:04:45 +00002322 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
Arnaud A. de Grandmaison760c1e02013-11-10 17:46:31 +00002323
Andrew Trick97064962013-07-25 07:26:26 +00002324 DEBUG(LIS->dump());
2325
Jakob Stoklund Olesenf1a60a62011-02-19 00:53:42 +00002326 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
Benjamin Kramere2a1d892013-06-17 19:00:36 +00002327 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
Jakob Stoklund Olesen30a85632011-07-02 01:37:09 +00002328 ExtraRegInfo.clear();
2329 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2330 NextCascade = 1;
Jakob Stoklund Olesen96eebf02012-06-20 22:52:26 +00002331 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
Jakob Stoklund Olesendab4b9a2011-07-26 23:41:46 +00002332 GlobalCand.resize(32); // This will grow as needed.
Jakob Stoklund Olesene7601e92010-12-15 23:46:13 +00002333
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002334 allocatePhysRegs();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002335 releaseMemory();
Jakob Stoklund Olesenb8812a12010-12-08 03:26:16 +00002336 return true;
2337}