blob: 3e4b5b6eccd6312797a8e7e5854234b4057b9111 [file] [log] [blame]
Sergei Larin3e590402012-09-04 14:49:56 +00001//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "HexagonMachineScheduler.h"
18
19#include <queue>
20
21using namespace llvm;
22
Sergei Larin3e590402012-09-04 14:49:56 +000023/// Check if scheduling of this SU is possible
24/// in the current packet.
25/// It is _not_ precise (statefull), it is more like
26/// another heuristic. Many corner cases are figured
27/// empirically.
28bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
29 if (!SU || !SU->getInstr())
30 return false;
31
32 // First see if the pipeline could receive this instruction
33 // in the current cycle.
34 switch (SU->getInstr()->getOpcode()) {
35 default:
36 if (!ResourcesModel->canReserveResources(SU->getInstr()))
37 return false;
38 case TargetOpcode::EXTRACT_SUBREG:
39 case TargetOpcode::INSERT_SUBREG:
40 case TargetOpcode::SUBREG_TO_REG:
41 case TargetOpcode::REG_SEQUENCE:
42 case TargetOpcode::IMPLICIT_DEF:
43 case TargetOpcode::COPY:
44 case TargetOpcode::INLINEASM:
45 break;
46 }
47
48 // Now see if there are no other dependencies to instructions already
49 // in the packet.
50 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
51 if (Packet[i]->Succs.size() == 0)
52 continue;
53 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
54 E = Packet[i]->Succs.end(); I != E; ++I) {
55 // Since we do not add pseudos to packets, might as well
56 // ignore order dependencies.
57 if (I->isCtrl())
58 continue;
59
60 if (I->getSUnit() == SU)
61 return false;
62 }
63 }
64 return true;
65}
66
67/// Keep track of available resources.
Sergei Larin7ae51be2012-09-10 17:31:34 +000068bool VLIWResourceModel::reserveResources(SUnit *SU) {
69 bool startNewCycle = false;
Sergei Larin3e590402012-09-04 14:49:56 +000070 // If this SU does not fit in the packet
71 // start a new one.
72 if (!isResourceAvailable(SU)) {
73 ResourcesModel->clearResources();
74 Packet.clear();
75 TotalPackets++;
Sergei Larin7ae51be2012-09-10 17:31:34 +000076 startNewCycle = true;
Sergei Larin3e590402012-09-04 14:49:56 +000077 }
78
79 switch (SU->getInstr()->getOpcode()) {
80 default:
81 ResourcesModel->reserveResources(SU->getInstr());
82 break;
83 case TargetOpcode::EXTRACT_SUBREG:
84 case TargetOpcode::INSERT_SUBREG:
85 case TargetOpcode::SUBREG_TO_REG:
86 case TargetOpcode::REG_SEQUENCE:
87 case TargetOpcode::IMPLICIT_DEF:
88 case TargetOpcode::KILL:
89 case TargetOpcode::PROLOG_LABEL:
90 case TargetOpcode::EH_LABEL:
91 case TargetOpcode::COPY:
92 case TargetOpcode::INLINEASM:
93 break;
94 }
95 Packet.push_back(SU);
96
97#ifndef NDEBUG
98 DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
99 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
100 DEBUG(dbgs() << "\t[" << i << "] SU(");
Sergei Larin7ae51be2012-09-10 17:31:34 +0000101 DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
102 DEBUG(Packet[i]->getInstr()->dump());
Sergei Larin3e590402012-09-04 14:49:56 +0000103 }
104#endif
105
106 // If packet is now full, reset the state so in the next cycle
107 // we start fresh.
108 if (Packet.size() >= InstrItins->SchedModel->IssueWidth) {
109 ResourcesModel->clearResources();
110 Packet.clear();
111 TotalPackets++;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000112 startNewCycle = true;
Sergei Larin3e590402012-09-04 14:49:56 +0000113 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000114
115 return startNewCycle;
Sergei Larin3e590402012-09-04 14:49:56 +0000116}
117
Sergei Larin3e590402012-09-04 14:49:56 +0000118/// schedule - Called back from MachineScheduler::runOnMachineFunction
119/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
120/// only includes instructions that have DAG nodes, not scheduling boundaries.
121void VLIWMachineScheduler::schedule() {
122 DEBUG(dbgs()
123 << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber()
124 << " " << BB->getName()
125 << " in_func " << BB->getParent()->getFunction()->getName()
Benjamin Kramere5c4fe52012-09-14 12:19:58 +0000126 << " at loop depth " << MLI.getLoopDepth(BB)
Sergei Larin3e590402012-09-04 14:49:56 +0000127 << " \n");
128
Andrew Trick78e5efe2012-09-11 00:39:15 +0000129 buildDAGWithRegPressure();
Sergei Larin3e590402012-09-04 14:49:56 +0000130
Sergei Larin7ae51be2012-09-10 17:31:34 +0000131 // To view Height/Depth correctly, they should be accessed at least once.
132 DEBUG(unsigned maxH = 0;
133 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
134 if (SUnits[su].getHeight() > maxH)
135 maxH = SUnits[su].getHeight();
136 dbgs() << "Max Height " << maxH << "\n";);
137 DEBUG(unsigned maxD = 0;
138 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
139 if (SUnits[su].getDepth() > maxD)
140 maxD = SUnits[su].getDepth();
141 dbgs() << "Max Depth " << maxD << "\n";);
Sergei Larin3e590402012-09-04 14:49:56 +0000142 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
143 SUnits[su].dumpAll(this));
144
Andrew Trick78e5efe2012-09-11 00:39:15 +0000145 initQueues();
Sergei Larin3e590402012-09-04 14:49:56 +0000146
Sergei Larin3e590402012-09-04 14:49:56 +0000147 bool IsTopNode = false;
148 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
149 if (!checkSchedLimit())
150 break;
151
Andrew Trick78e5efe2012-09-11 00:39:15 +0000152 scheduleMI(SU, IsTopNode);
Sergei Larin3e590402012-09-04 14:49:56 +0000153
Andrew Trick78e5efe2012-09-11 00:39:15 +0000154 updateQueues(SU, IsTopNode);
Sergei Larin3e590402012-09-04 14:49:56 +0000155 }
156 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
157
Sergei Larin3e590402012-09-04 14:49:56 +0000158 placeDebugValues();
159}
160
Andrew Trick78e5efe2012-09-11 00:39:15 +0000161void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
162 DAG = static_cast<VLIWMachineScheduler*>(dag);
Sergei Larin3e590402012-09-04 14:49:56 +0000163 TRI = DAG->TRI;
Andrew Trick78e5efe2012-09-11 00:39:15 +0000164 Top.DAG = DAG;
165 Bot.DAG = DAG;
Sergei Larin3e590402012-09-04 14:49:56 +0000166
167 // Initialize the HazardRecognizers.
168 const TargetMachine &TM = DAG->MF.getTarget();
169 const InstrItineraryData *Itin = TM.getInstrItineraryData();
170 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
171 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
172
Sergei Larin7ae51be2012-09-10 17:31:34 +0000173 Top.ResourceModel = new VLIWResourceModel(TM);
174 Bot.ResourceModel = new VLIWResourceModel(TM);
175
Andrew Trick78e5efe2012-09-11 00:39:15 +0000176 assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
Sergei Larin3e590402012-09-04 14:49:56 +0000177 "-misched-topdown incompatible with -misched-bottomup");
178}
179
180void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
181 if (SU->isScheduled)
182 return;
183
184 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
185 I != E; ++I) {
186 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
187 unsigned MinLatency = I->getMinLatency();
188#ifndef NDEBUG
189 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
190#endif
191 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
192 SU->TopReadyCycle = PredReadyCycle + MinLatency;
193 }
194 Top.releaseNode(SU, SU->TopReadyCycle);
195}
196
197void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
198 if (SU->isScheduled)
199 return;
200
201 assert(SU->getInstr() && "Scheduled SUnit must have instr");
202
203 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
204 I != E; ++I) {
205 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
206 unsigned MinLatency = I->getMinLatency();
207#ifndef NDEBUG
208 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
209#endif
210 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
211 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
212 }
213 Bot.releaseNode(SU, SU->BotReadyCycle);
214}
215
216/// Does this SU have a hazard within the current instruction group.
217///
218/// The scheduler supports two modes of hazard recognition. The first is the
219/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
220/// supports highly complicated in-order reservation tables
221/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
222///
223/// The second is a streamlined mechanism that checks for hazards based on
224/// simple counters that the scheduler itself maintains. It explicitly checks
225/// for instruction dispatch limitations, including the number of micro-ops that
226/// can dispatch per cycle.
227///
228/// TODO: Also check whether the SU must start a new group.
229bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) {
230 if (HazardRec->isEnabled())
231 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
232
233 if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth())
234 return true;
235
236 return false;
237}
238
239void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU,
240 unsigned ReadyCycle) {
241 if (ReadyCycle < MinReadyCycle)
242 MinReadyCycle = ReadyCycle;
243
244 // Check for interlocks first. For the purpose of other heuristics, an
245 // instruction that cannot issue appears as if it's not in the ReadyQueue.
246 if (ReadyCycle > CurrCycle || checkHazard(SU))
247
248 Pending.push(SU);
249 else
250 Available.push(SU);
251}
252
253/// Move the boundary of scheduled code by one cycle.
254void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() {
255 unsigned Width = DAG->getIssueWidth();
256 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
257
258 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
259 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
260
261 if (!HazardRec->isEnabled()) {
262 // Bypass HazardRec virtual calls.
263 CurrCycle = NextCycle;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000264 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000265 // Bypass getHazardType calls in case of long latency.
266 for (; CurrCycle != NextCycle; ++CurrCycle) {
267 if (isTop())
268 HazardRec->AdvanceCycle();
269 else
270 HazardRec->RecedeCycle();
271 }
272 }
273 CheckPending = true;
274
275 DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
276 << CurrCycle << '\n');
277}
278
279/// Move the boundary of scheduled code by one SUnit.
280void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) {
Sergei Larin7ae51be2012-09-10 17:31:34 +0000281 bool startNewCycle = false;
Sergei Larin3e590402012-09-04 14:49:56 +0000282
283 // Update the reservation table.
284 if (HazardRec->isEnabled()) {
285 if (!isTop() && SU->isCall) {
286 // Calls are scheduled with their preceding instructions. For bottom-up
287 // scheduling, clear the pipeline state before emitting.
288 HazardRec->Reset();
289 }
290 HazardRec->EmitInstruction(SU);
291 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000292
293 // Update DFA model.
294 startNewCycle = ResourceModel->reserveResources(SU);
295
Sergei Larin3e590402012-09-04 14:49:56 +0000296 // Check the instruction group dispatch limit.
297 // TODO: Check if this SU must end a dispatch group.
298 IssueCount += DAG->getNumMicroOps(SU->getInstr());
Sergei Larin7ae51be2012-09-10 17:31:34 +0000299 if (startNewCycle) {
Sergei Larin3e590402012-09-04 14:49:56 +0000300 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
301 bumpCycle();
302 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000303 else
304 DEBUG(dbgs() << "*** IssueCount " << IssueCount
305 << " at cycle " << CurrCycle << '\n');
Sergei Larin3e590402012-09-04 14:49:56 +0000306}
307
308/// Release pending ready nodes in to the available queue. This makes them
309/// visible to heuristics.
310void ConvergingVLIWScheduler::SchedBoundary::releasePending() {
311 // If the available queue is empty, it is safe to reset MinReadyCycle.
312 if (Available.empty())
313 MinReadyCycle = UINT_MAX;
314
315 // Check to see if any of the pending instructions are ready to issue. If
316 // so, add them to the available queue.
317 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
318 SUnit *SU = *(Pending.begin()+i);
319 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
320
321 if (ReadyCycle < MinReadyCycle)
322 MinReadyCycle = ReadyCycle;
323
324 if (ReadyCycle > CurrCycle)
325 continue;
326
327 if (checkHazard(SU))
328 continue;
329
330 Available.push(SU);
331 Pending.remove(Pending.begin()+i);
332 --i; --e;
333 }
334 CheckPending = false;
335}
336
337/// Remove SU from the ready set for this boundary.
338void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) {
339 if (Available.isInQueue(SU))
340 Available.remove(Available.find(SU));
341 else {
342 assert(Pending.isInQueue(SU) && "bad ready count");
343 Pending.remove(Pending.find(SU));
344 }
345}
346
347/// If this queue only has one ready candidate, return it. As a side effect,
348/// advance the cycle until at least one node is ready. If multiple instructions
349/// are ready, return NULL.
350SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() {
351 if (CheckPending)
352 releasePending();
353
354 for (unsigned i = 0; Available.empty(); ++i) {
355 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
356 "permanent hazard"); (void)i;
357 bumpCycle();
358 releasePending();
359 }
360 if (Available.size() == 1)
361 return *Available.begin();
362 return NULL;
363}
364
365#ifndef NDEBUG
Sergei Larin7ae51be2012-09-10 17:31:34 +0000366void ConvergingVLIWScheduler::traceCandidate(const char *Label,
367 const ReadyQueue &Q,
368 SUnit *SU, PressureElement P) {
Sergei Larin3e590402012-09-04 14:49:56 +0000369 dbgs() << Label << " " << Q.getName() << " ";
370 if (P.isValid())
371 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
372 << " ";
373 else
374 dbgs() << " ";
375 SU->dump(DAG);
376}
377#endif
378
Sergei Larin7ae51be2012-09-10 17:31:34 +0000379/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
380/// of SU, return it, otherwise return null.
381static SUnit *getSingleUnscheduledPred(SUnit *SU) {
382 SUnit *OnlyAvailablePred = 0;
383 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
384 I != E; ++I) {
385 SUnit &Pred = *I->getSUnit();
386 if (!Pred.isScheduled) {
387 // We found an available, but not scheduled, predecessor. If it's the
388 // only one we have found, keep track of it... otherwise give up.
389 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
390 return 0;
391 OnlyAvailablePred = &Pred;
392 }
393 }
394 return OnlyAvailablePred;
395}
396
397/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
398/// of SU, return it, otherwise return null.
399static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
400 SUnit *OnlyAvailableSucc = 0;
401 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
402 I != E; ++I) {
403 SUnit &Succ = *I->getSUnit();
404 if (!Succ.isScheduled) {
405 // We found an available, but not scheduled, successor. If it's the
406 // only one we have found, keep track of it... otherwise give up.
407 if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ)
408 return 0;
409 OnlyAvailableSucc = &Succ;
410 }
411 }
412 return OnlyAvailableSucc;
413}
414
Sergei Larin3e590402012-09-04 14:49:56 +0000415// Constants used to denote relative importance of
416// heuristic components for cost computation.
417static const unsigned PriorityOne = 200;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000418static const unsigned PriorityTwo = 100;
Sergei Larin3e590402012-09-04 14:49:56 +0000419static const unsigned PriorityThree = 50;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000420static const unsigned PriorityFour = 20;
Sergei Larin3e590402012-09-04 14:49:56 +0000421static const unsigned ScaleTwo = 10;
422static const unsigned FactorOne = 2;
423
424/// Single point to compute overall scheduling cost.
425/// TODO: More heuristics will be used soon.
426int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
427 SchedCandidate &Candidate,
428 RegPressureDelta &Delta,
429 bool verbose) {
430 // Initial trivial priority.
431 int ResCount = 1;
432
433 // Do not waste time on a node that is already scheduled.
434 if (!SU || SU->isScheduled)
435 return ResCount;
436
437 // Forced priority is high.
438 if (SU->isScheduleHigh)
439 ResCount += PriorityOne;
440
441 // Critical path first.
Sergei Larin7ae51be2012-09-10 17:31:34 +0000442 if (Q.getID() == TopQID) {
Sergei Larin3e590402012-09-04 14:49:56 +0000443 ResCount += (SU->getHeight() * ScaleTwo);
Sergei Larin7ae51be2012-09-10 17:31:34 +0000444
445 // If resources are available for it, multiply the
446 // chance of scheduling.
447 if (Top.ResourceModel->isResourceAvailable(SU))
448 ResCount <<= FactorOne;
449 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000450 ResCount += (SU->getDepth() * ScaleTwo);
451
Sergei Larin7ae51be2012-09-10 17:31:34 +0000452 // If resources are available for it, multiply the
453 // chance of scheduling.
454 if (Bot.ResourceModel->isResourceAvailable(SU))
455 ResCount <<= FactorOne;
456 }
457
458 unsigned NumNodesBlocking = 0;
459 if (Q.getID() == TopQID) {
460 // How many SUs does it block from scheduling?
461 // Look at all of the successors of this node.
462 // Count the number of nodes that
463 // this node is the sole unscheduled node for.
464 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
465 I != E; ++I)
466 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
467 ++NumNodesBlocking;
468 } else {
469 // How many unscheduled predecessors block this node?
470 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
471 I != E; ++I)
472 if (getSingleUnscheduledSucc(I->getSUnit()) == SU)
473 ++NumNodesBlocking;
474 }
475 ResCount += (NumNodesBlocking * ScaleTwo);
Sergei Larin3e590402012-09-04 14:49:56 +0000476
477 // Factor in reg pressure as a heuristic.
Sergei Larin7ae51be2012-09-10 17:31:34 +0000478 ResCount -= (Delta.Excess.UnitIncrease*PriorityThree);
479 ResCount -= (Delta.CriticalMax.UnitIncrease*PriorityThree);
Sergei Larin3e590402012-09-04 14:49:56 +0000480
481 DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
482
483 return ResCount;
484}
485
486/// Pick the best candidate from the top queue.
487///
488/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
489/// DAG building. To adjust for the current scheduling location we need to
490/// maintain the number of vreg uses remaining to be top-scheduled.
491ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
492pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
493 SchedCandidate &Candidate) {
494 DEBUG(Q.dump());
495
496 // getMaxPressureDelta temporarily modifies the tracker.
497 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
498
499 // BestSU remains NULL if no top candidates beat the best existing candidate.
500 CandResult FoundCandidate = NoCand;
501 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
502 RegPressureDelta RPDelta;
503 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
504 DAG->getRegionCriticalPSets(),
505 DAG->getRegPressure().MaxSetPressure);
506
507 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
508
509 // Initialize the candidate if needed.
510 if (!Candidate.SU) {
511 Candidate.SU = *I;
512 Candidate.RPDelta = RPDelta;
513 Candidate.SCost = CurrentCost;
514 FoundCandidate = NodeOrder;
515 continue;
516 }
517
Sergei Larin3e590402012-09-04 14:49:56 +0000518 // Best cost.
519 if (CurrentCost > Candidate.SCost) {
520 DEBUG(traceCandidate("CCAND", Q, *I));
521 Candidate.SU = *I;
522 Candidate.RPDelta = RPDelta;
523 Candidate.SCost = CurrentCost;
524 FoundCandidate = BestCost;
525 continue;
526 }
527
528 // Fall through to original instruction order.
529 // Only consider node order if Candidate was chosen from this Q.
530 if (FoundCandidate == NoCand)
531 continue;
532 }
533 return FoundCandidate;
534}
535
536/// Pick the best candidate node from either the top or bottom queue.
537SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
538 // Schedule as far as possible in the direction of no choice. This is most
539 // efficient, but also provides the best heuristics for CriticalPSets.
540 if (SUnit *SU = Bot.pickOnlyChoice()) {
541 IsTopNode = false;
542 return SU;
543 }
544 if (SUnit *SU = Top.pickOnlyChoice()) {
545 IsTopNode = true;
546 return SU;
547 }
548 SchedCandidate BotCand;
549 // Prefer bottom scheduling when heuristics are silent.
550 CandResult BotResult = pickNodeFromQueue(Bot.Available,
551 DAG->getBotRPTracker(), BotCand);
552 assert(BotResult != NoCand && "failed to find the first candidate");
553
554 // If either Q has a single candidate that provides the least increase in
555 // Excess pressure, we can immediately schedule from that Q.
556 //
557 // RegionCriticalPSets summarizes the pressure within the scheduled region and
558 // affects picking from either Q. If scheduling in one direction must
559 // increase pressure for one of the excess PSets, then schedule in that
560 // direction first to provide more freedom in the other direction.
561 if (BotResult == SingleExcess || BotResult == SingleCritical) {
562 IsTopNode = false;
563 return BotCand.SU;
564 }
565 // Check if the top Q has a better candidate.
566 SchedCandidate TopCand;
567 CandResult TopResult = pickNodeFromQueue(Top.Available,
568 DAG->getTopRPTracker(), TopCand);
569 assert(TopResult != NoCand && "failed to find the first candidate");
570
571 if (TopResult == SingleExcess || TopResult == SingleCritical) {
572 IsTopNode = true;
573 return TopCand.SU;
574 }
575 // If either Q has a single candidate that minimizes pressure above the
576 // original region's pressure pick it.
577 if (BotResult == SingleMax) {
578 IsTopNode = false;
579 return BotCand.SU;
580 }
581 if (TopResult == SingleMax) {
582 IsTopNode = true;
583 return TopCand.SU;
584 }
585 if (TopCand.SCost > BotCand.SCost) {
586 IsTopNode = true;
587 return TopCand.SU;
588 }
589 // Otherwise prefer the bottom candidate in node order.
590 IsTopNode = false;
591 return BotCand.SU;
592}
593
594/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
595SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
596 if (DAG->top() == DAG->bottom()) {
597 assert(Top.Available.empty() && Top.Pending.empty() &&
598 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
599 return NULL;
600 }
601 SUnit *SU;
Andrew Trick78e5efe2012-09-11 00:39:15 +0000602 if (llvm::ForceTopDown) {
Sergei Larin3e590402012-09-04 14:49:56 +0000603 SU = Top.pickOnlyChoice();
604 if (!SU) {
605 SchedCandidate TopCand;
606 CandResult TopResult =
607 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
608 assert(TopResult != NoCand && "failed to find the first candidate");
609 (void)TopResult;
610 SU = TopCand.SU;
611 }
612 IsTopNode = true;
Andrew Trick78e5efe2012-09-11 00:39:15 +0000613 } else if (llvm::ForceBottomUp) {
Sergei Larin3e590402012-09-04 14:49:56 +0000614 SU = Bot.pickOnlyChoice();
615 if (!SU) {
616 SchedCandidate BotCand;
617 CandResult BotResult =
618 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
619 assert(BotResult != NoCand && "failed to find the first candidate");
620 (void)BotResult;
621 SU = BotCand.SU;
622 }
623 IsTopNode = false;
624 } else {
625 SU = pickNodeBidrectional(IsTopNode);
626 }
627 if (SU->isTopReady())
628 Top.removeReady(SU);
629 if (SU->isBottomReady())
630 Bot.removeReady(SU);
631
632 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
633 << " Scheduling Instruction in cycle "
634 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
635 SU->dump(DAG));
636 return SU;
637}
638
639/// Update the scheduler's state after scheduling a node. This is the same node
Sergei Larin7ae51be2012-09-10 17:31:34 +0000640/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
641/// to update it's state based on the current cycle before MachineSchedStrategy
642/// does.
Sergei Larin3e590402012-09-04 14:49:56 +0000643void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
644 if (IsTopNode) {
645 SU->TopReadyCycle = Top.CurrCycle;
646 Top.bumpNode(SU);
Sergei Larin7ae51be2012-09-10 17:31:34 +0000647 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000648 SU->BotReadyCycle = Bot.CurrCycle;
649 Bot.bumpNode(SU);
650 }
651}
652