blob: 0e9ef4838d8a1320520a118e2e9205a19867588c [file] [log] [blame]
Sergei Larin3e590402012-09-04 14:49:56 +00001//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "HexagonMachineScheduler.h"
18
19#include <queue>
20
21using namespace llvm;
22
Sergei Larinc6a66602012-09-14 15:07:59 +000023/// Platform specific modifications to DAG.
24void VLIWMachineScheduler::postprocessDAG() {
25 SUnit* LastSequentialCall = NULL;
26 // Currently we only catch the situation when compare gets scheduled
27 // before preceding call.
28 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
29 // Remember the call.
30 if (SUnits[su].getInstr()->isCall())
31 LastSequentialCall = &(SUnits[su]);
32 // Look for a compare that defines a predicate.
33 else if (SUnits[su].getInstr()->isCompare() && LastSequentialCall)
Andrew Tricka78d3222012-11-06 03:13:46 +000034 SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier));
Sergei Larinc6a66602012-09-14 15:07:59 +000035 }
36}
37
Sergei Larin3e590402012-09-04 14:49:56 +000038/// Check if scheduling of this SU is possible
39/// in the current packet.
40/// It is _not_ precise (statefull), it is more like
41/// another heuristic. Many corner cases are figured
42/// empirically.
43bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
44 if (!SU || !SU->getInstr())
45 return false;
46
47 // First see if the pipeline could receive this instruction
48 // in the current cycle.
49 switch (SU->getInstr()->getOpcode()) {
50 default:
51 if (!ResourcesModel->canReserveResources(SU->getInstr()))
52 return false;
53 case TargetOpcode::EXTRACT_SUBREG:
54 case TargetOpcode::INSERT_SUBREG:
55 case TargetOpcode::SUBREG_TO_REG:
56 case TargetOpcode::REG_SEQUENCE:
57 case TargetOpcode::IMPLICIT_DEF:
58 case TargetOpcode::COPY:
59 case TargetOpcode::INLINEASM:
60 break;
61 }
62
63 // Now see if there are no other dependencies to instructions already
64 // in the packet.
65 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
66 if (Packet[i]->Succs.size() == 0)
67 continue;
68 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
69 E = Packet[i]->Succs.end(); I != E; ++I) {
70 // Since we do not add pseudos to packets, might as well
71 // ignore order dependencies.
72 if (I->isCtrl())
73 continue;
74
75 if (I->getSUnit() == SU)
76 return false;
77 }
78 }
79 return true;
80}
81
82/// Keep track of available resources.
Sergei Larin7ae51be2012-09-10 17:31:34 +000083bool VLIWResourceModel::reserveResources(SUnit *SU) {
84 bool startNewCycle = false;
Sergei Larinc6a66602012-09-14 15:07:59 +000085 // Artificially reset state.
86 if (!SU) {
87 ResourcesModel->clearResources();
88 Packet.clear();
89 TotalPackets++;
90 return false;
91 }
Sergei Larin3e590402012-09-04 14:49:56 +000092 // If this SU does not fit in the packet
93 // start a new one.
94 if (!isResourceAvailable(SU)) {
95 ResourcesModel->clearResources();
96 Packet.clear();
97 TotalPackets++;
Sergei Larin7ae51be2012-09-10 17:31:34 +000098 startNewCycle = true;
Sergei Larin3e590402012-09-04 14:49:56 +000099 }
100
101 switch (SU->getInstr()->getOpcode()) {
102 default:
103 ResourcesModel->reserveResources(SU->getInstr());
104 break;
105 case TargetOpcode::EXTRACT_SUBREG:
106 case TargetOpcode::INSERT_SUBREG:
107 case TargetOpcode::SUBREG_TO_REG:
108 case TargetOpcode::REG_SEQUENCE:
109 case TargetOpcode::IMPLICIT_DEF:
110 case TargetOpcode::KILL:
111 case TargetOpcode::PROLOG_LABEL:
112 case TargetOpcode::EH_LABEL:
113 case TargetOpcode::COPY:
114 case TargetOpcode::INLINEASM:
115 break;
116 }
117 Packet.push_back(SU);
118
119#ifndef NDEBUG
120 DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
121 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
122 DEBUG(dbgs() << "\t[" << i << "] SU(");
Sergei Larin7ae51be2012-09-10 17:31:34 +0000123 DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
124 DEBUG(Packet[i]->getInstr()->dump());
Sergei Larin3e590402012-09-04 14:49:56 +0000125 }
126#endif
127
128 // If packet is now full, reset the state so in the next cycle
129 // we start fresh.
Andrew Trick412cd2f2012-10-10 05:43:09 +0000130 if (Packet.size() >= SchedModel->getIssueWidth()) {
Sergei Larin3e590402012-09-04 14:49:56 +0000131 ResourcesModel->clearResources();
132 Packet.clear();
133 TotalPackets++;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000134 startNewCycle = true;
Sergei Larin3e590402012-09-04 14:49:56 +0000135 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000136
137 return startNewCycle;
Sergei Larin3e590402012-09-04 14:49:56 +0000138}
139
Sergei Larin3e590402012-09-04 14:49:56 +0000140/// schedule - Called back from MachineScheduler::runOnMachineFunction
141/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
142/// only includes instructions that have DAG nodes, not scheduling boundaries.
143void VLIWMachineScheduler::schedule() {
144 DEBUG(dbgs()
145 << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber()
146 << " " << BB->getName()
147 << " in_func " << BB->getParent()->getFunction()->getName()
Benjamin Kramere5c4fe52012-09-14 12:19:58 +0000148 << " at loop depth " << MLI.getLoopDepth(BB)
Sergei Larin3e590402012-09-04 14:49:56 +0000149 << " \n");
150
Andrew Trick78e5efe2012-09-11 00:39:15 +0000151 buildDAGWithRegPressure();
Sergei Larin3e590402012-09-04 14:49:56 +0000152
Sergei Larinc6a66602012-09-14 15:07:59 +0000153 // Postprocess the DAG to add platform specific artificial dependencies.
154 postprocessDAG();
155
Sergei Larin7ae51be2012-09-10 17:31:34 +0000156 // To view Height/Depth correctly, they should be accessed at least once.
157 DEBUG(unsigned maxH = 0;
158 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
159 if (SUnits[su].getHeight() > maxH)
160 maxH = SUnits[su].getHeight();
161 dbgs() << "Max Height " << maxH << "\n";);
162 DEBUG(unsigned maxD = 0;
163 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
164 if (SUnits[su].getDepth() > maxD)
165 maxD = SUnits[su].getDepth();
166 dbgs() << "Max Depth " << maxD << "\n";);
Sergei Larin3e590402012-09-04 14:49:56 +0000167 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
168 SUnits[su].dumpAll(this));
169
Andrew Trick78e5efe2012-09-11 00:39:15 +0000170 initQueues();
Sergei Larin3e590402012-09-04 14:49:56 +0000171
Sergei Larin3e590402012-09-04 14:49:56 +0000172 bool IsTopNode = false;
173 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
174 if (!checkSchedLimit())
175 break;
176
Andrew Trick78e5efe2012-09-11 00:39:15 +0000177 scheduleMI(SU, IsTopNode);
Sergei Larin3e590402012-09-04 14:49:56 +0000178
Andrew Trick78e5efe2012-09-11 00:39:15 +0000179 updateQueues(SU, IsTopNode);
Sergei Larin3e590402012-09-04 14:49:56 +0000180 }
181 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
182
Sergei Larin3e590402012-09-04 14:49:56 +0000183 placeDebugValues();
184}
185
Andrew Trick78e5efe2012-09-11 00:39:15 +0000186void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
187 DAG = static_cast<VLIWMachineScheduler*>(dag);
Andrew Trick412cd2f2012-10-10 05:43:09 +0000188 SchedModel = DAG->getSchedModel();
Sergei Larin3e590402012-09-04 14:49:56 +0000189 TRI = DAG->TRI;
Andrew Trick412cd2f2012-10-10 05:43:09 +0000190 Top.init(DAG, SchedModel);
191 Bot.init(DAG, SchedModel);
Sergei Larin3e590402012-09-04 14:49:56 +0000192
Andrew Trick412cd2f2012-10-10 05:43:09 +0000193 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
194 // are disabled, then these HazardRecs will be disabled.
195 const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
Sergei Larin3e590402012-09-04 14:49:56 +0000196 const TargetMachine &TM = DAG->MF.getTarget();
Sergei Larin3e590402012-09-04 14:49:56 +0000197 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
198 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
199
Andrew Trick412cd2f2012-10-10 05:43:09 +0000200 Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
201 Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
Sergei Larin7ae51be2012-09-10 17:31:34 +0000202
Andrew Trick78e5efe2012-09-11 00:39:15 +0000203 assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
Sergei Larin3e590402012-09-04 14:49:56 +0000204 "-misched-topdown incompatible with -misched-bottomup");
205}
206
207void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
208 if (SU->isScheduled)
209 return;
210
211 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
212 I != E; ++I) {
213 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
214 unsigned MinLatency = I->getMinLatency();
215#ifndef NDEBUG
216 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
217#endif
218 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
219 SU->TopReadyCycle = PredReadyCycle + MinLatency;
220 }
221 Top.releaseNode(SU, SU->TopReadyCycle);
222}
223
224void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
225 if (SU->isScheduled)
226 return;
227
228 assert(SU->getInstr() && "Scheduled SUnit must have instr");
229
230 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
231 I != E; ++I) {
232 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
233 unsigned MinLatency = I->getMinLatency();
234#ifndef NDEBUG
235 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
236#endif
237 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
238 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
239 }
240 Bot.releaseNode(SU, SU->BotReadyCycle);
241}
242
243/// Does this SU have a hazard within the current instruction group.
244///
245/// The scheduler supports two modes of hazard recognition. The first is the
246/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
247/// supports highly complicated in-order reservation tables
248/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
249///
250/// The second is a streamlined mechanism that checks for hazards based on
251/// simple counters that the scheduler itself maintains. It explicitly checks
252/// for instruction dispatch limitations, including the number of micro-ops that
253/// can dispatch per cycle.
254///
255/// TODO: Also check whether the SU must start a new group.
256bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) {
257 if (HazardRec->isEnabled())
258 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
259
Andrew Trick412cd2f2012-10-10 05:43:09 +0000260 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
261 if (IssueCount + uops > SchedModel->getIssueWidth())
Sergei Larin3e590402012-09-04 14:49:56 +0000262 return true;
263
264 return false;
265}
266
267void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU,
268 unsigned ReadyCycle) {
269 if (ReadyCycle < MinReadyCycle)
270 MinReadyCycle = ReadyCycle;
271
272 // Check for interlocks first. For the purpose of other heuristics, an
273 // instruction that cannot issue appears as if it's not in the ReadyQueue.
274 if (ReadyCycle > CurrCycle || checkHazard(SU))
275
276 Pending.push(SU);
277 else
278 Available.push(SU);
279}
280
281/// Move the boundary of scheduled code by one cycle.
282void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() {
Andrew Trick412cd2f2012-10-10 05:43:09 +0000283 unsigned Width = SchedModel->getIssueWidth();
Sergei Larin3e590402012-09-04 14:49:56 +0000284 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
285
286 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
287 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
288
289 if (!HazardRec->isEnabled()) {
290 // Bypass HazardRec virtual calls.
291 CurrCycle = NextCycle;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000292 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000293 // Bypass getHazardType calls in case of long latency.
294 for (; CurrCycle != NextCycle; ++CurrCycle) {
295 if (isTop())
296 HazardRec->AdvanceCycle();
297 else
298 HazardRec->RecedeCycle();
299 }
300 }
301 CheckPending = true;
302
303 DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
304 << CurrCycle << '\n');
305}
306
307/// Move the boundary of scheduled code by one SUnit.
308void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) {
Sergei Larin7ae51be2012-09-10 17:31:34 +0000309 bool startNewCycle = false;
Sergei Larin3e590402012-09-04 14:49:56 +0000310
311 // Update the reservation table.
312 if (HazardRec->isEnabled()) {
313 if (!isTop() && SU->isCall) {
314 // Calls are scheduled with their preceding instructions. For bottom-up
315 // scheduling, clear the pipeline state before emitting.
316 HazardRec->Reset();
317 }
318 HazardRec->EmitInstruction(SU);
319 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000320
321 // Update DFA model.
322 startNewCycle = ResourceModel->reserveResources(SU);
323
Sergei Larin3e590402012-09-04 14:49:56 +0000324 // Check the instruction group dispatch limit.
325 // TODO: Check if this SU must end a dispatch group.
Andrew Trick412cd2f2012-10-10 05:43:09 +0000326 IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
Sergei Larin7ae51be2012-09-10 17:31:34 +0000327 if (startNewCycle) {
Sergei Larin3e590402012-09-04 14:49:56 +0000328 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
329 bumpCycle();
330 }
Sergei Larin7ae51be2012-09-10 17:31:34 +0000331 else
332 DEBUG(dbgs() << "*** IssueCount " << IssueCount
333 << " at cycle " << CurrCycle << '\n');
Sergei Larin3e590402012-09-04 14:49:56 +0000334}
335
336/// Release pending ready nodes in to the available queue. This makes them
337/// visible to heuristics.
338void ConvergingVLIWScheduler::SchedBoundary::releasePending() {
339 // If the available queue is empty, it is safe to reset MinReadyCycle.
340 if (Available.empty())
341 MinReadyCycle = UINT_MAX;
342
343 // Check to see if any of the pending instructions are ready to issue. If
344 // so, add them to the available queue.
345 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
346 SUnit *SU = *(Pending.begin()+i);
347 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
348
349 if (ReadyCycle < MinReadyCycle)
350 MinReadyCycle = ReadyCycle;
351
352 if (ReadyCycle > CurrCycle)
353 continue;
354
355 if (checkHazard(SU))
356 continue;
357
358 Available.push(SU);
359 Pending.remove(Pending.begin()+i);
360 --i; --e;
361 }
362 CheckPending = false;
363}
364
365/// Remove SU from the ready set for this boundary.
366void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) {
367 if (Available.isInQueue(SU))
368 Available.remove(Available.find(SU));
369 else {
370 assert(Pending.isInQueue(SU) && "bad ready count");
371 Pending.remove(Pending.find(SU));
372 }
373}
374
375/// If this queue only has one ready candidate, return it. As a side effect,
376/// advance the cycle until at least one node is ready. If multiple instructions
377/// are ready, return NULL.
378SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() {
379 if (CheckPending)
380 releasePending();
381
382 for (unsigned i = 0; Available.empty(); ++i) {
383 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
384 "permanent hazard"); (void)i;
Sergei Larinc6a66602012-09-14 15:07:59 +0000385 ResourceModel->reserveResources(0);
Sergei Larin3e590402012-09-04 14:49:56 +0000386 bumpCycle();
387 releasePending();
388 }
389 if (Available.size() == 1)
390 return *Available.begin();
391 return NULL;
392}
393
394#ifndef NDEBUG
Sergei Larin7ae51be2012-09-10 17:31:34 +0000395void ConvergingVLIWScheduler::traceCandidate(const char *Label,
396 const ReadyQueue &Q,
397 SUnit *SU, PressureElement P) {
Sergei Larin3e590402012-09-04 14:49:56 +0000398 dbgs() << Label << " " << Q.getName() << " ";
399 if (P.isValid())
400 dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease
401 << " ";
402 else
403 dbgs() << " ";
404 SU->dump(DAG);
405}
406#endif
407
Sergei Larin7ae51be2012-09-10 17:31:34 +0000408/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
409/// of SU, return it, otherwise return null.
410static SUnit *getSingleUnscheduledPred(SUnit *SU) {
411 SUnit *OnlyAvailablePred = 0;
412 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
413 I != E; ++I) {
414 SUnit &Pred = *I->getSUnit();
415 if (!Pred.isScheduled) {
416 // We found an available, but not scheduled, predecessor. If it's the
417 // only one we have found, keep track of it... otherwise give up.
418 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
419 return 0;
420 OnlyAvailablePred = &Pred;
421 }
422 }
423 return OnlyAvailablePred;
424}
425
426/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
427/// of SU, return it, otherwise return null.
428static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
429 SUnit *OnlyAvailableSucc = 0;
430 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
431 I != E; ++I) {
432 SUnit &Succ = *I->getSUnit();
433 if (!Succ.isScheduled) {
434 // We found an available, but not scheduled, successor. If it's the
435 // only one we have found, keep track of it... otherwise give up.
436 if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ)
437 return 0;
438 OnlyAvailableSucc = &Succ;
439 }
440 }
441 return OnlyAvailableSucc;
442}
443
Sergei Larin3e590402012-09-04 14:49:56 +0000444// Constants used to denote relative importance of
445// heuristic components for cost computation.
446static const unsigned PriorityOne = 200;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000447static const unsigned PriorityTwo = 100;
Sergei Larin3e590402012-09-04 14:49:56 +0000448static const unsigned PriorityThree = 50;
Sergei Larin7ae51be2012-09-10 17:31:34 +0000449static const unsigned PriorityFour = 20;
Sergei Larin3e590402012-09-04 14:49:56 +0000450static const unsigned ScaleTwo = 10;
451static const unsigned FactorOne = 2;
452
453/// Single point to compute overall scheduling cost.
454/// TODO: More heuristics will be used soon.
455int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
456 SchedCandidate &Candidate,
457 RegPressureDelta &Delta,
458 bool verbose) {
459 // Initial trivial priority.
460 int ResCount = 1;
461
462 // Do not waste time on a node that is already scheduled.
463 if (!SU || SU->isScheduled)
464 return ResCount;
465
466 // Forced priority is high.
467 if (SU->isScheduleHigh)
468 ResCount += PriorityOne;
469
470 // Critical path first.
Sergei Larin7ae51be2012-09-10 17:31:34 +0000471 if (Q.getID() == TopQID) {
Sergei Larin3e590402012-09-04 14:49:56 +0000472 ResCount += (SU->getHeight() * ScaleTwo);
Sergei Larin7ae51be2012-09-10 17:31:34 +0000473
474 // If resources are available for it, multiply the
475 // chance of scheduling.
476 if (Top.ResourceModel->isResourceAvailable(SU))
477 ResCount <<= FactorOne;
478 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000479 ResCount += (SU->getDepth() * ScaleTwo);
480
Sergei Larin7ae51be2012-09-10 17:31:34 +0000481 // If resources are available for it, multiply the
482 // chance of scheduling.
483 if (Bot.ResourceModel->isResourceAvailable(SU))
484 ResCount <<= FactorOne;
485 }
486
487 unsigned NumNodesBlocking = 0;
488 if (Q.getID() == TopQID) {
489 // How many SUs does it block from scheduling?
490 // Look at all of the successors of this node.
491 // Count the number of nodes that
492 // this node is the sole unscheduled node for.
493 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
494 I != E; ++I)
495 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
496 ++NumNodesBlocking;
497 } else {
498 // How many unscheduled predecessors block this node?
499 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
500 I != E; ++I)
501 if (getSingleUnscheduledSucc(I->getSUnit()) == SU)
502 ++NumNodesBlocking;
503 }
504 ResCount += (NumNodesBlocking * ScaleTwo);
Sergei Larin3e590402012-09-04 14:49:56 +0000505
506 // Factor in reg pressure as a heuristic.
Sergei Larin7ae51be2012-09-10 17:31:34 +0000507 ResCount -= (Delta.Excess.UnitIncrease*PriorityThree);
508 ResCount -= (Delta.CriticalMax.UnitIncrease*PriorityThree);
Sergei Larin3e590402012-09-04 14:49:56 +0000509
510 DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
511
512 return ResCount;
513}
514
515/// Pick the best candidate from the top queue.
516///
517/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
518/// DAG building. To adjust for the current scheduling location we need to
519/// maintain the number of vreg uses remaining to be top-scheduled.
520ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
521pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
522 SchedCandidate &Candidate) {
523 DEBUG(Q.dump());
524
525 // getMaxPressureDelta temporarily modifies the tracker.
526 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
527
528 // BestSU remains NULL if no top candidates beat the best existing candidate.
529 CandResult FoundCandidate = NoCand;
530 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
531 RegPressureDelta RPDelta;
532 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
533 DAG->getRegionCriticalPSets(),
534 DAG->getRegPressure().MaxSetPressure);
535
536 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
537
538 // Initialize the candidate if needed.
539 if (!Candidate.SU) {
540 Candidate.SU = *I;
541 Candidate.RPDelta = RPDelta;
542 Candidate.SCost = CurrentCost;
543 FoundCandidate = NodeOrder;
544 continue;
545 }
546
Sergei Larin3e590402012-09-04 14:49:56 +0000547 // Best cost.
548 if (CurrentCost > Candidate.SCost) {
549 DEBUG(traceCandidate("CCAND", Q, *I));
550 Candidate.SU = *I;
551 Candidate.RPDelta = RPDelta;
552 Candidate.SCost = CurrentCost;
553 FoundCandidate = BestCost;
554 continue;
555 }
556
557 // Fall through to original instruction order.
558 // Only consider node order if Candidate was chosen from this Q.
559 if (FoundCandidate == NoCand)
560 continue;
561 }
562 return FoundCandidate;
563}
564
565/// Pick the best candidate node from either the top or bottom queue.
566SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
567 // Schedule as far as possible in the direction of no choice. This is most
568 // efficient, but also provides the best heuristics for CriticalPSets.
569 if (SUnit *SU = Bot.pickOnlyChoice()) {
570 IsTopNode = false;
571 return SU;
572 }
573 if (SUnit *SU = Top.pickOnlyChoice()) {
574 IsTopNode = true;
575 return SU;
576 }
577 SchedCandidate BotCand;
578 // Prefer bottom scheduling when heuristics are silent.
579 CandResult BotResult = pickNodeFromQueue(Bot.Available,
580 DAG->getBotRPTracker(), BotCand);
581 assert(BotResult != NoCand && "failed to find the first candidate");
582
583 // If either Q has a single candidate that provides the least increase in
584 // Excess pressure, we can immediately schedule from that Q.
585 //
586 // RegionCriticalPSets summarizes the pressure within the scheduled region and
587 // affects picking from either Q. If scheduling in one direction must
588 // increase pressure for one of the excess PSets, then schedule in that
589 // direction first to provide more freedom in the other direction.
590 if (BotResult == SingleExcess || BotResult == SingleCritical) {
591 IsTopNode = false;
592 return BotCand.SU;
593 }
594 // Check if the top Q has a better candidate.
595 SchedCandidate TopCand;
596 CandResult TopResult = pickNodeFromQueue(Top.Available,
597 DAG->getTopRPTracker(), TopCand);
598 assert(TopResult != NoCand && "failed to find the first candidate");
599
600 if (TopResult == SingleExcess || TopResult == SingleCritical) {
601 IsTopNode = true;
602 return TopCand.SU;
603 }
604 // If either Q has a single candidate that minimizes pressure above the
605 // original region's pressure pick it.
606 if (BotResult == SingleMax) {
607 IsTopNode = false;
608 return BotCand.SU;
609 }
610 if (TopResult == SingleMax) {
611 IsTopNode = true;
612 return TopCand.SU;
613 }
614 if (TopCand.SCost > BotCand.SCost) {
615 IsTopNode = true;
616 return TopCand.SU;
617 }
618 // Otherwise prefer the bottom candidate in node order.
619 IsTopNode = false;
620 return BotCand.SU;
621}
622
623/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
624SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
625 if (DAG->top() == DAG->bottom()) {
626 assert(Top.Available.empty() && Top.Pending.empty() &&
627 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
628 return NULL;
629 }
630 SUnit *SU;
Andrew Trick78e5efe2012-09-11 00:39:15 +0000631 if (llvm::ForceTopDown) {
Sergei Larin3e590402012-09-04 14:49:56 +0000632 SU = Top.pickOnlyChoice();
633 if (!SU) {
634 SchedCandidate TopCand;
635 CandResult TopResult =
636 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
637 assert(TopResult != NoCand && "failed to find the first candidate");
638 (void)TopResult;
639 SU = TopCand.SU;
640 }
641 IsTopNode = true;
Andrew Trick78e5efe2012-09-11 00:39:15 +0000642 } else if (llvm::ForceBottomUp) {
Sergei Larin3e590402012-09-04 14:49:56 +0000643 SU = Bot.pickOnlyChoice();
644 if (!SU) {
645 SchedCandidate BotCand;
646 CandResult BotResult =
647 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
648 assert(BotResult != NoCand && "failed to find the first candidate");
649 (void)BotResult;
650 SU = BotCand.SU;
651 }
652 IsTopNode = false;
653 } else {
654 SU = pickNodeBidrectional(IsTopNode);
655 }
656 if (SU->isTopReady())
657 Top.removeReady(SU);
658 if (SU->isBottomReady())
659 Bot.removeReady(SU);
660
661 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
662 << " Scheduling Instruction in cycle "
663 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
664 SU->dump(DAG));
665 return SU;
666}
667
668/// Update the scheduler's state after scheduling a node. This is the same node
Sergei Larin7ae51be2012-09-10 17:31:34 +0000669/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
670/// to update it's state based on the current cycle before MachineSchedStrategy
671/// does.
Sergei Larin3e590402012-09-04 14:49:56 +0000672void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
673 if (IsTopNode) {
674 SU->TopReadyCycle = Top.CurrCycle;
675 Top.bumpNode(SU);
Sergei Larin7ae51be2012-09-10 17:31:34 +0000676 } else {
Sergei Larin3e590402012-09-04 14:49:56 +0000677 SU->BotReadyCycle = Bot.CurrCycle;
678 Bot.bumpNode(SU);
679 }
680}
681