blob: 8852cd5b5cf8291c0467e0699cbbfe5c9538760e [file] [log] [blame]
Andrew Trickd06df962012-02-01 22:13:57 +00001//===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the ResourcePriorityQueue class, which is a
11// SchedulingPriorityQueue that prioritizes instructions using DFA state to
12// reduce the length of the critical path through the basic block
13// on VLIW platforms.
14// The scheduler is basically a top-down adaptable list scheduler with DFA
15// resource tracking added to the cost function.
16// DFA is queried as a state machine to model "packets/bundles" during
17// schedule. Currently packets/bundles are discarded at the end of
18// scheduling, affecting only order of instructions.
19//
20//===----------------------------------------------------------------------===//
21
Andrew Trickd06df962012-02-01 22:13:57 +000022#include "llvm/CodeGen/ResourcePriorityQueue.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000023#include "llvm/CodeGen/MachineInstr.h"
24#include "llvm/CodeGen/SelectionDAGNodes.h"
Andrew Trickd06df962012-02-01 22:13:57 +000025#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
Andrew Trickd06df962012-02-01 22:13:57 +000028#include "llvm/Target/TargetLowering.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000029#include "llvm/Target/TargetMachine.h"
Eric Christopherd9134482014-08-04 21:25:23 +000030#include "llvm/Target/TargetSubtargetInfo.h"
Andrew Trickd06df962012-02-01 22:13:57 +000031
32using namespace llvm;
33
Chandler Carruth1b9dde02014-04-22 02:02:50 +000034#define DEBUG_TYPE "scheduler"
35
Andrew Trickd06df962012-02-01 22:13:57 +000036static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden,
37 cl::ZeroOrMore, cl::init(false),
38 cl::desc("Disable use of DFA during scheduling"));
39
40static cl::opt<signed> RegPressureThreshold(
41 "dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5),
42 cl::desc("Track reg pressure and switch priority to in-depth"));
43
Eric Christopher6d0e40b2014-07-23 22:27:10 +000044ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
Eric Christopherd9134482014-08-04 21:25:23 +000045 : Picker(this), InstrItins(IS->getTargetLowering()
46 ->getTargetMachine()
47 .getSubtargetImpl()
48 ->getInstrItineraryData()) {
Eric Christopher6d0e40b2014-07-23 22:27:10 +000049 const TargetMachine &TM = (*IS->MF).getTarget();
Eric Christopherd9134482014-08-04 21:25:23 +000050 TRI = TM.getSubtargetImpl()->getRegisterInfo();
Eric Christopher6d0e40b2014-07-23 22:27:10 +000051 TLI = IS->getTargetLowering();
Eric Christopherd9134482014-08-04 21:25:23 +000052 TII = TM.getSubtargetImpl()->getInstrInfo();
Eric Christopher6d0e40b2014-07-23 22:27:10 +000053 ResourcesModel = TII->CreateTargetScheduleState(&TM, nullptr);
54 // This hard requirement could be relaxed, but for now
55 // do not let it procede.
Eric Christopherf19d12b2014-07-23 22:34:13 +000056 assert(ResourcesModel && "Unimplemented CreateTargetScheduleState.");
Andrew Trickd06df962012-02-01 22:13:57 +000057
Eric Christopherf19d12b2014-07-23 22:34:13 +000058 unsigned NumRC = TRI->getNumRegClasses();
59 RegLimit.resize(NumRC);
60 RegPressure.resize(NumRC);
61 std::fill(RegLimit.begin(), RegLimit.end(), 0);
62 std::fill(RegPressure.begin(), RegPressure.end(), 0);
63 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
64 E = TRI->regclass_end();
65 I != E; ++I)
66 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, *IS->MF);
Andrew Trickd06df962012-02-01 22:13:57 +000067
Eric Christopherf19d12b2014-07-23 22:34:13 +000068 ParallelLiveRanges = 0;
69 HorizontalVerticalBalance = 0;
Andrew Trickd06df962012-02-01 22:13:57 +000070}
71
72unsigned
73ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
74 unsigned NumberDeps = 0;
75 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
76 I != E; ++I) {
77 if (I->isCtrl())
78 continue;
79
80 SUnit *PredSU = I->getSUnit();
81 const SDNode *ScegN = PredSU->getNode();
82
83 if (!ScegN)
84 continue;
85
86 // If value is passed to CopyToReg, it is probably
87 // live outside BB.
88 switch (ScegN->getOpcode()) {
89 default: break;
90 case ISD::TokenFactor: break;
91 case ISD::CopyFromReg: NumberDeps++; break;
92 case ISD::CopyToReg: break;
93 case ISD::INLINEASM: break;
94 }
95 if (!ScegN->isMachineOpcode())
96 continue;
97
98 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
Patrik Hagglund5e6c3612012-12-13 06:34:11 +000099 MVT VT = ScegN->getSimpleValueType(i);
Andrew Trickd06df962012-02-01 22:13:57 +0000100 if (TLI->isTypeLegal(VT)
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000101 && (TLI->getRegClassFor(VT)->getID() == RCId)) {
Andrew Trickd06df962012-02-01 22:13:57 +0000102 NumberDeps++;
103 break;
104 }
105 }
106 }
107 return NumberDeps;
108}
109
110unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
111 unsigned RCId) {
112 unsigned NumberDeps = 0;
113 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
114 I != E; ++I) {
115 if (I->isCtrl())
116 continue;
117
118 SUnit *SuccSU = I->getSUnit();
119 const SDNode *ScegN = SuccSU->getNode();
120 if (!ScegN)
121 continue;
122
123 // If value is passed to CopyToReg, it is probably
124 // live outside BB.
125 switch (ScegN->getOpcode()) {
126 default: break;
127 case ISD::TokenFactor: break;
128 case ISD::CopyFromReg: break;
129 case ISD::CopyToReg: NumberDeps++; break;
130 case ISD::INLINEASM: break;
131 }
132 if (!ScegN->isMachineOpcode())
133 continue;
134
135 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
136 const SDValue &Op = ScegN->getOperand(i);
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000137 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
Andrew Trickd06df962012-02-01 22:13:57 +0000138 if (TLI->isTypeLegal(VT)
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000139 && (TLI->getRegClassFor(VT)->getID() == RCId)) {
Andrew Trickd06df962012-02-01 22:13:57 +0000140 NumberDeps++;
141 break;
142 }
143 }
144 }
145 return NumberDeps;
146}
147
148static unsigned numberCtrlDepsInSU(SUnit *SU) {
149 unsigned NumberDeps = 0;
150 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
151 I != E; ++I)
152 if (I->isCtrl())
153 NumberDeps++;
154
155 return NumberDeps;
156}
157
158static unsigned numberCtrlPredInSU(SUnit *SU) {
159 unsigned NumberDeps = 0;
160 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
161 I != E; ++I)
162 if (I->isCtrl())
163 NumberDeps++;
164
165 return NumberDeps;
166}
167
168///
169/// Initialize nodes.
170///
171void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) {
172 SUnits = &sunits;
173 NumNodesSolelyBlocking.resize(SUnits->size(), 0);
174
175 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
176 SUnit *SU = &(*SUnits)[i];
177 initNumRegDefsLeft(SU);
178 SU->NodeQueueId = 0;
179 }
180}
181
182/// This heuristic is used if DFA scheduling is not desired
183/// for some VLIW platform.
184bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
185 // The isScheduleHigh flag allows nodes with wraparound dependencies that
186 // cannot easily be modeled as edges with latencies to be scheduled as
187 // soon as possible in a top-down schedule.
188 if (LHS->isScheduleHigh && !RHS->isScheduleHigh)
189 return false;
190
191 if (!LHS->isScheduleHigh && RHS->isScheduleHigh)
192 return true;
193
194 unsigned LHSNum = LHS->NodeNum;
195 unsigned RHSNum = RHS->NodeNum;
196
197 // The most important heuristic is scheduling the critical path.
198 unsigned LHSLatency = PQ->getLatency(LHSNum);
199 unsigned RHSLatency = PQ->getLatency(RHSNum);
200 if (LHSLatency < RHSLatency) return true;
201 if (LHSLatency > RHSLatency) return false;
202
203 // After that, if two nodes have identical latencies, look to see if one will
204 // unblock more other nodes than the other.
205 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
206 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
207 if (LHSBlocked < RHSBlocked) return true;
208 if (LHSBlocked > RHSBlocked) return false;
209
210 // Finally, just to provide a stable ordering, use the node number as a
211 // deciding factor.
212 return LHSNum < RHSNum;
213}
214
215
216/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
217/// of SU, return it, otherwise return null.
218SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
Craig Topperc0196b12014-04-14 00:51:57 +0000219 SUnit *OnlyAvailablePred = nullptr;
Andrew Trickd06df962012-02-01 22:13:57 +0000220 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
221 I != E; ++I) {
222 SUnit &Pred = *I->getSUnit();
223 if (!Pred.isScheduled) {
224 // We found an available, but not scheduled, predecessor. If it's the
225 // only one we have found, keep track of it... otherwise give up.
226 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
Craig Topperc0196b12014-04-14 00:51:57 +0000227 return nullptr;
Andrew Trickd06df962012-02-01 22:13:57 +0000228 OnlyAvailablePred = &Pred;
229 }
230 }
231 return OnlyAvailablePred;
232}
233
234void ResourcePriorityQueue::push(SUnit *SU) {
235 // Look at all of the successors of this node. Count the number of nodes that
236 // this node is the sole unscheduled node for.
237 unsigned NumNodesBlocking = 0;
238 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
239 I != E; ++I)
240 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
241 ++NumNodesBlocking;
242
243 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
244 Queue.push_back(SU);
245}
246
247/// Check if scheduling of this SU is possible
248/// in the current packet.
249bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
250 if (!SU || !SU->getNode())
251 return false;
252
253 // If this is a compound instruction,
254 // it is likely to be a call. Do not delay it.
255 if (SU->getNode()->getGluedNode())
256 return true;
257
258 // First see if the pipeline could receive this instruction
259 // in the current cycle.
260 if (SU->getNode()->isMachineOpcode())
261 switch (SU->getNode()->getMachineOpcode()) {
262 default:
263 if (!ResourcesModel->canReserveResources(&TII->get(
264 SU->getNode()->getMachineOpcode())))
265 return false;
266 case TargetOpcode::EXTRACT_SUBREG:
267 case TargetOpcode::INSERT_SUBREG:
268 case TargetOpcode::SUBREG_TO_REG:
269 case TargetOpcode::REG_SEQUENCE:
270 case TargetOpcode::IMPLICIT_DEF:
271 break;
272 }
273
274 // Now see if there are no other dependencies
275 // to instructions alredy in the packet.
276 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
277 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
278 E = Packet[i]->Succs.end(); I != E; ++I) {
279 // Since we do not add pseudos to packets, might as well
280 // ignor order deps.
281 if (I->isCtrl())
282 continue;
283
284 if (I->getSUnit() == SU)
285 return false;
286 }
287
288 return true;
289}
290
291/// Keep track of available resources.
292void ResourcePriorityQueue::reserveResources(SUnit *SU) {
293 // If this SU does not fit in the packet
294 // start a new one.
295 if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) {
296 ResourcesModel->clearResources();
297 Packet.clear();
298 }
299
300 if (SU->getNode() && SU->getNode()->isMachineOpcode()) {
301 switch (SU->getNode()->getMachineOpcode()) {
302 default:
303 ResourcesModel->reserveResources(&TII->get(
304 SU->getNode()->getMachineOpcode()));
305 break;
306 case TargetOpcode::EXTRACT_SUBREG:
307 case TargetOpcode::INSERT_SUBREG:
308 case TargetOpcode::SUBREG_TO_REG:
309 case TargetOpcode::REG_SEQUENCE:
310 case TargetOpcode::IMPLICIT_DEF:
311 break;
312 }
313 Packet.push_back(SU);
314 }
315 // Forcefully end packet for PseudoOps.
316 else {
317 ResourcesModel->clearResources();
318 Packet.clear();
319 }
320
321 // If packet is now full, reset the state so in the next cycle
322 // we start fresh.
Andrew Trick87255e32012-07-07 04:00:00 +0000323 if (Packet.size() >= InstrItins->SchedModel->IssueWidth) {
Andrew Trickd06df962012-02-01 22:13:57 +0000324 ResourcesModel->clearResources();
325 Packet.clear();
326 }
327}
328
329signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) {
330 signed RegBalance = 0;
331
332 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
333 return RegBalance;
334
335 // Gen estimate.
336 for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) {
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000337 MVT VT = SU->getNode()->getSimpleValueType(i);
Andrew Trickd06df962012-02-01 22:13:57 +0000338 if (TLI->isTypeLegal(VT)
339 && TLI->getRegClassFor(VT)
340 && TLI->getRegClassFor(VT)->getID() == RCId)
341 RegBalance += numberRCValSuccInSU(SU, RCId);
342 }
343 // Kill estimate.
344 for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) {
345 const SDValue &Op = SU->getNode()->getOperand(i);
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000346 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
Andrew Trickd06df962012-02-01 22:13:57 +0000347 if (isa<ConstantSDNode>(Op.getNode()))
348 continue;
349
350 if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT)
351 && TLI->getRegClassFor(VT)->getID() == RCId)
352 RegBalance -= numberRCValPredInSU(SU, RCId);
353 }
354 return RegBalance;
355}
356
357/// Estimates change in reg pressure from this SU.
Benjamin Kramerbde91762012-06-02 10:20:22 +0000358/// It is achieved by trivial tracking of defined
Andrew Trickd06df962012-02-01 22:13:57 +0000359/// and used vregs in dependent instructions.
360/// The RawPressure flag makes this function to ignore
361/// existing reg file sizes, and report raw def/use
362/// balance.
363signed ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) {
364 signed RegBalance = 0;
365
366 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode())
367 return RegBalance;
368
369 if (RawPressure) {
370 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
371 E = TRI->regclass_end(); I != E; ++I) {
372 const TargetRegisterClass *RC = *I;
373 RegBalance += rawRegPressureDelta(SU, RC->getID());
374 }
375 }
376 else {
377 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
378 E = TRI->regclass_end(); I != E; ++I) {
379 const TargetRegisterClass *RC = *I;
380 if ((RegPressure[RC->getID()] +
381 rawRegPressureDelta(SU, RC->getID()) > 0) &&
382 (RegPressure[RC->getID()] +
383 rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()]))
384 RegBalance += rawRegPressureDelta(SU, RC->getID());
385 }
386 }
387
388 return RegBalance;
389}
390
391// Constants used to denote relative importance of
392// heuristic components for cost computation.
393static const unsigned PriorityOne = 200;
Eli Friedman8f06d552013-09-11 00:41:02 +0000394static const unsigned PriorityTwo = 50;
395static const unsigned PriorityThree = 15;
396static const unsigned PriorityFour = 5;
Andrew Trickd06df962012-02-01 22:13:57 +0000397static const unsigned ScaleOne = 20;
398static const unsigned ScaleTwo = 10;
399static const unsigned ScaleThree = 5;
400static const unsigned FactorOne = 2;
401
402/// Returns single number reflecting benefit of scheduling SU
403/// in the current cycle.
404signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) {
405 // Initial trivial priority.
406 signed ResCount = 1;
407
408 // Do not waste time on a node that is already scheduled.
409 if (SU->isScheduled)
410 return ResCount;
411
412 // Forced priority is high.
413 if (SU->isScheduleHigh)
414 ResCount += PriorityOne;
415
416 // Adaptable scheduling
417 // A small, but very parallel
418 // region, where reg pressure is an issue.
419 if (HorizontalVerticalBalance > RegPressureThreshold) {
420 // Critical path first
421 ResCount += (SU->getHeight() * ScaleTwo);
422 // If resources are available for it, multiply the
423 // chance of scheduling.
424 if (isResourceAvailable(SU))
425 ResCount <<= FactorOne;
426
427 // Consider change to reg pressure from scheduling
428 // this SU.
429 ResCount -= (regPressureDelta(SU,true) * ScaleOne);
430 }
431 // Default heuristic, greeady and
432 // critical path driven.
433 else {
434 // Critical path first.
435 ResCount += (SU->getHeight() * ScaleTwo);
436 // Now see how many instructions is blocked by this SU.
437 ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo);
438 // If resources are available for it, multiply the
439 // chance of scheduling.
440 if (isResourceAvailable(SU))
441 ResCount <<= FactorOne;
442
443 ResCount -= (regPressureDelta(SU) * ScaleTwo);
444 }
445
Alp Tokercf218752014-06-30 18:57:16 +0000446 // These are platform-specific things.
Andrew Trickd06df962012-02-01 22:13:57 +0000447 // Will need to go into the back end
448 // and accessed from here via a hook.
449 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
450 if (N->isMachineOpcode()) {
451 const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
452 if (TID.isCall())
Eli Friedman8f06d552013-09-11 00:41:02 +0000453 ResCount += (PriorityTwo + (ScaleThree*N->getNumValues()));
Andrew Trickd06df962012-02-01 22:13:57 +0000454 }
455 else
456 switch (N->getOpcode()) {
457 default: break;
458 case ISD::TokenFactor:
459 case ISD::CopyFromReg:
460 case ISD::CopyToReg:
Eli Friedman8f06d552013-09-11 00:41:02 +0000461 ResCount += PriorityFour;
Andrew Trickd06df962012-02-01 22:13:57 +0000462 break;
463
464 case ISD::INLINEASM:
Eli Friedman8f06d552013-09-11 00:41:02 +0000465 ResCount += PriorityThree;
Andrew Trickd06df962012-02-01 22:13:57 +0000466 break;
467 }
468 }
469 return ResCount;
470}
471
472
473/// Main resource tracking point.
Andrew Trick52226d42012-03-07 23:00:49 +0000474void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
Andrew Trickd06df962012-02-01 22:13:57 +0000475 // Use NULL entry as an event marker to reset
476 // the DFA state.
477 if (!SU) {
478 ResourcesModel->clearResources();
479 Packet.clear();
480 return;
481 }
482
483 const SDNode *ScegN = SU->getNode();
484 // Update reg pressure tracking.
485 // First update current node.
486 if (ScegN->isMachineOpcode()) {
487 // Estimate generated regs.
488 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) {
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000489 MVT VT = ScegN->getSimpleValueType(i);
Andrew Trickd06df962012-02-01 22:13:57 +0000490
491 if (TLI->isTypeLegal(VT)) {
492 const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
493 if (RC)
494 RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID());
495 }
496 }
497 // Estimate killed regs.
498 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) {
499 const SDValue &Op = ScegN->getOperand(i);
Patrik Hagglund5e6c3612012-12-13 06:34:11 +0000500 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
Andrew Trickd06df962012-02-01 22:13:57 +0000501
502 if (TLI->isTypeLegal(VT)) {
503 const TargetRegisterClass *RC = TLI->getRegClassFor(VT);
504 if (RC) {
505 if (RegPressure[RC->getID()] >
506 (numberRCValPredInSU(SU, RC->getID())))
507 RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID());
508 else RegPressure[RC->getID()] = 0;
509 }
510 }
511 }
512 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
513 I != E; ++I) {
514 if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
515 continue;
516 --I->getSUnit()->NumRegDefsLeft;
517 }
518 }
519
520 // Reserve resources for this SU.
521 reserveResources(SU);
522
523 // Adjust number of parallel live ranges.
524 // Heuristic is simple - node with no data successors reduces
525 // number of live ranges. All others, increase it.
526 unsigned NumberNonControlDeps = 0;
527
528 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
529 I != E; ++I) {
530 adjustPriorityOfUnscheduledPreds(I->getSUnit());
531 if (!I->isCtrl())
532 NumberNonControlDeps++;
533 }
534
535 if (!NumberNonControlDeps) {
536 if (ParallelLiveRanges >= SU->NumPreds)
537 ParallelLiveRanges -= SU->NumPreds;
538 else
539 ParallelLiveRanges = 0;
540
541 }
542 else
543 ParallelLiveRanges += SU->NumRegDefsLeft;
544
545 // Track parallel live chains.
546 HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU));
547 HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU));
548}
549
550void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) {
551 unsigned NodeNumDefs = 0;
552 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
553 if (N->isMachineOpcode()) {
554 const MCInstrDesc &TID = TII->get(N->getMachineOpcode());
555 // No register need be allocated for this.
556 if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
557 NodeNumDefs = 0;
558 break;
559 }
560 NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs());
561 }
562 else
563 switch(N->getOpcode()) {
564 default: break;
565 case ISD::CopyFromReg:
566 NodeNumDefs++;
567 break;
568 case ISD::INLINEASM:
569 NodeNumDefs++;
570 break;
571 }
572
573 SU->NumRegDefsLeft = NodeNumDefs;
574}
575
576/// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
577/// scheduled. If SU is not itself available, then there is at least one
578/// predecessor node that has not been scheduled yet. If SU has exactly ONE
579/// unscheduled predecessor, we want to increase its priority: it getting
580/// scheduled will make this node available, so it is better than some other
581/// node of the same priority that will not make a node available.
582void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) {
583 if (SU->isAvailable) return; // All preds scheduled.
584
585 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
Craig Topperc0196b12014-04-14 00:51:57 +0000586 if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable)
Andrew Trickd06df962012-02-01 22:13:57 +0000587 return;
588
589 // Okay, we found a single predecessor that is available, but not scheduled.
590 // Since it is available, it must be in the priority queue. First remove it.
591 remove(OnlyAvailablePred);
592
593 // Reinsert the node into the priority queue, which recomputes its
594 // NumNodesSolelyBlocking value.
595 push(OnlyAvailablePred);
596}
597
598
599/// Main access point - returns next instructions
600/// to be placed in scheduling sequence.
601SUnit *ResourcePriorityQueue::pop() {
602 if (empty())
Craig Topperc0196b12014-04-14 00:51:57 +0000603 return nullptr;
Andrew Trickd06df962012-02-01 22:13:57 +0000604
605 std::vector<SUnit *>::iterator Best = Queue.begin();
606 if (!DisableDFASched) {
607 signed BestCost = SUSchedulingCost(*Best);
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000608 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
Andrew Trickd06df962012-02-01 22:13:57 +0000609 E = Queue.end(); I != E; ++I) {
Andrew Trickd06df962012-02-01 22:13:57 +0000610
611 if (SUSchedulingCost(*I) > BestCost) {
612 BestCost = SUSchedulingCost(*I);
613 Best = I;
614 }
615 }
616 }
617 // Use default TD scheduling mechanism.
618 else {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000619 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
Andrew Trickd06df962012-02-01 22:13:57 +0000620 E = Queue.end(); I != E; ++I)
621 if (Picker(*Best, *I))
622 Best = I;
623 }
624
625 SUnit *V = *Best;
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000626 if (Best != std::prev(Queue.end()))
Andrew Trickd06df962012-02-01 22:13:57 +0000627 std::swap(*Best, Queue.back());
628
629 Queue.pop_back();
630
631 return V;
632}
633
634
635void ResourcePriorityQueue::remove(SUnit *SU) {
636 assert(!Queue.empty() && "Queue is empty!");
637 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000638 if (I != std::prev(Queue.end()))
Andrew Trickd06df962012-02-01 22:13:57 +0000639 std::swap(*I, Queue.back());
640
641 Queue.pop_back();
642}
643
644
645#ifdef NDEBUG
646void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {}
647#else
648void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {
649 ResourcePriorityQueue q = *this;
650 while (!q.empty()) {
651 SUnit *su = q.pop();
652 dbgs() << "Height " << su->getHeight() << ": ";
653 su->dump(DAG);
654 }
655}
656#endif