blob: 98aeabb800beb6d77a1e733cbfc102151c49efda [file] [log] [blame]
Sergei Larin4d8986a2012-09-04 14:49:56 +00001//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "misched"
16
17#include "HexagonMachineScheduler.h"
Jakub Staszakdf17ddd2013-03-10 13:11:23 +000018#include "llvm/CodeGen/MachineLoopInfo.h"
19#include "llvm/IR/Function.h"
Sergei Larin4d8986a2012-09-04 14:49:56 +000020
21using namespace llvm;
22
Sergei Larin2db64a72012-09-14 15:07:59 +000023/// Platform specific modifications to DAG.
24void VLIWMachineScheduler::postprocessDAG() {
25 SUnit* LastSequentialCall = NULL;
26 // Currently we only catch the situation when compare gets scheduled
27 // before preceding call.
28 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
29 // Remember the call.
30 if (SUnits[su].getInstr()->isCall())
31 LastSequentialCall = &(SUnits[su]);
32 // Look for a compare that defines a predicate.
33 else if (SUnits[su].getInstr()->isCompare() && LastSequentialCall)
Andrew Trickbaeaabb2012-11-06 03:13:46 +000034 SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier));
Sergei Larin2db64a72012-09-14 15:07:59 +000035 }
36}
37
Sergei Larin4d8986a2012-09-04 14:49:56 +000038/// Check if scheduling of this SU is possible
39/// in the current packet.
40/// It is _not_ precise (statefull), it is more like
41/// another heuristic. Many corner cases are figured
42/// empirically.
43bool VLIWResourceModel::isResourceAvailable(SUnit *SU) {
44 if (!SU || !SU->getInstr())
45 return false;
46
47 // First see if the pipeline could receive this instruction
48 // in the current cycle.
49 switch (SU->getInstr()->getOpcode()) {
50 default:
51 if (!ResourcesModel->canReserveResources(SU->getInstr()))
52 return false;
53 case TargetOpcode::EXTRACT_SUBREG:
54 case TargetOpcode::INSERT_SUBREG:
55 case TargetOpcode::SUBREG_TO_REG:
56 case TargetOpcode::REG_SEQUENCE:
57 case TargetOpcode::IMPLICIT_DEF:
58 case TargetOpcode::COPY:
59 case TargetOpcode::INLINEASM:
60 break;
61 }
62
63 // Now see if there are no other dependencies to instructions already
64 // in the packet.
65 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
66 if (Packet[i]->Succs.size() == 0)
67 continue;
68 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
69 E = Packet[i]->Succs.end(); I != E; ++I) {
70 // Since we do not add pseudos to packets, might as well
71 // ignore order dependencies.
72 if (I->isCtrl())
73 continue;
74
75 if (I->getSUnit() == SU)
76 return false;
77 }
78 }
79 return true;
80}
81
82/// Keep track of available resources.
Sergei Larinef4cc112012-09-10 17:31:34 +000083bool VLIWResourceModel::reserveResources(SUnit *SU) {
84 bool startNewCycle = false;
Sergei Larin2db64a72012-09-14 15:07:59 +000085 // Artificially reset state.
86 if (!SU) {
87 ResourcesModel->clearResources();
88 Packet.clear();
89 TotalPackets++;
90 return false;
91 }
Sergei Larin4d8986a2012-09-04 14:49:56 +000092 // If this SU does not fit in the packet
93 // start a new one.
94 if (!isResourceAvailable(SU)) {
95 ResourcesModel->clearResources();
96 Packet.clear();
97 TotalPackets++;
Sergei Larinef4cc112012-09-10 17:31:34 +000098 startNewCycle = true;
Sergei Larin4d8986a2012-09-04 14:49:56 +000099 }
100
101 switch (SU->getInstr()->getOpcode()) {
102 default:
103 ResourcesModel->reserveResources(SU->getInstr());
104 break;
105 case TargetOpcode::EXTRACT_SUBREG:
106 case TargetOpcode::INSERT_SUBREG:
107 case TargetOpcode::SUBREG_TO_REG:
108 case TargetOpcode::REG_SEQUENCE:
109 case TargetOpcode::IMPLICIT_DEF:
110 case TargetOpcode::KILL:
111 case TargetOpcode::PROLOG_LABEL:
112 case TargetOpcode::EH_LABEL:
113 case TargetOpcode::COPY:
114 case TargetOpcode::INLINEASM:
115 break;
116 }
117 Packet.push_back(SU);
118
119#ifndef NDEBUG
120 DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
121 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
122 DEBUG(dbgs() << "\t[" << i << "] SU(");
Sergei Larinef4cc112012-09-10 17:31:34 +0000123 DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
124 DEBUG(Packet[i]->getInstr()->dump());
Sergei Larin4d8986a2012-09-04 14:49:56 +0000125 }
126#endif
127
128 // If packet is now full, reset the state so in the next cycle
129 // we start fresh.
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000130 if (Packet.size() >= SchedModel->getIssueWidth()) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000131 ResourcesModel->clearResources();
132 Packet.clear();
133 TotalPackets++;
Sergei Larinef4cc112012-09-10 17:31:34 +0000134 startNewCycle = true;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000135 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000136
137 return startNewCycle;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000138}
139
Sergei Larin4d8986a2012-09-04 14:49:56 +0000140/// schedule - Called back from MachineScheduler::runOnMachineFunction
141/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
142/// only includes instructions that have DAG nodes, not scheduling boundaries.
143void VLIWMachineScheduler::schedule() {
144 DEBUG(dbgs()
145 << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber()
146 << " " << BB->getName()
147 << " in_func " << BB->getParent()->getFunction()->getName()
Benjamin Kramer61f67082012-09-14 12:19:58 +0000148 << " at loop depth " << MLI.getLoopDepth(BB)
Sergei Larin4d8986a2012-09-04 14:49:56 +0000149 << " \n");
150
Andrew Trick7a8e1002012-09-11 00:39:15 +0000151 buildDAGWithRegPressure();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000152
Sergei Larin2db64a72012-09-14 15:07:59 +0000153 // Postprocess the DAG to add platform specific artificial dependencies.
154 postprocessDAG();
155
Andrew Tricke2c3f5c2013-01-25 06:33:57 +0000156 SmallVector<SUnit*, 8> TopRoots, BotRoots;
157 findRootsAndBiasEdges(TopRoots, BotRoots);
158
159 // Initialize the strategy before modifying the DAG.
160 SchedImpl->initialize(this);
161
Sergei Larinef4cc112012-09-10 17:31:34 +0000162 // To view Height/Depth correctly, they should be accessed at least once.
Andrew Trick63474622013-03-02 01:43:08 +0000163 //
164 // FIXME: SUnit::dumpAll always recompute depth and height now. The max
165 // depth/height could be computed directly from the roots and leaves.
Sergei Larinef4cc112012-09-10 17:31:34 +0000166 DEBUG(unsigned maxH = 0;
167 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
168 if (SUnits[su].getHeight() > maxH)
169 maxH = SUnits[su].getHeight();
170 dbgs() << "Max Height " << maxH << "\n";);
171 DEBUG(unsigned maxD = 0;
172 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
173 if (SUnits[su].getDepth() > maxD)
174 maxD = SUnits[su].getDepth();
175 dbgs() << "Max Depth " << maxD << "\n";);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000176 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
177 SUnits[su].dumpAll(this));
178
Andrew Tricke2c3f5c2013-01-25 06:33:57 +0000179 initQueues(TopRoots, BotRoots);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000180
Sergei Larin4d8986a2012-09-04 14:49:56 +0000181 bool IsTopNode = false;
182 while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) {
183 if (!checkSchedLimit())
184 break;
185
Andrew Trick7a8e1002012-09-11 00:39:15 +0000186 scheduleMI(SU, IsTopNode);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000187
Andrew Trick7a8e1002012-09-11 00:39:15 +0000188 updateQueues(SU, IsTopNode);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000189
190 // Notify the scheduling strategy after updating the DAG.
191 SchedImpl->schedNode(SU, IsTopNode);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000192 }
193 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
194
Sergei Larin4d8986a2012-09-04 14:49:56 +0000195 placeDebugValues();
196}
197
Andrew Trick7a8e1002012-09-11 00:39:15 +0000198void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
199 DAG = static_cast<VLIWMachineScheduler*>(dag);
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000200 SchedModel = DAG->getSchedModel();
Andrew Trick553e0fe2013-02-13 19:22:27 +0000201
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000202 Top.init(DAG, SchedModel);
203 Bot.init(DAG, SchedModel);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000204
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000205 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
206 // are disabled, then these HazardRecs will be disabled.
207 const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000208 const TargetMachine &TM = DAG->MF.getTarget();
Andrew Trick553e0fe2013-02-13 19:22:27 +0000209 delete Top.HazardRec;
210 delete Bot.HazardRec;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000211 Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
212 Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
213
Chandler Carruthc18e39c2013-07-27 10:48:45 +0000214 delete Top.ResourceModel;
215 delete Bot.ResourceModel;
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000216 Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
217 Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel());
Sergei Larinef4cc112012-09-10 17:31:34 +0000218
Andrew Trick7a8e1002012-09-11 00:39:15 +0000219 assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) &&
Sergei Larin4d8986a2012-09-04 14:49:56 +0000220 "-misched-topdown incompatible with -misched-bottomup");
221}
222
223void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
224 if (SU->isScheduled)
225 return;
226
227 for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end();
228 I != E; ++I) {
229 unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
Andrew Trickde2109e2013-06-15 04:49:57 +0000230 unsigned MinLatency = I->getLatency();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000231#ifndef NDEBUG
232 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
233#endif
234 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
235 SU->TopReadyCycle = PredReadyCycle + MinLatency;
236 }
237 Top.releaseNode(SU, SU->TopReadyCycle);
238}
239
240void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
241 if (SU->isScheduled)
242 return;
243
244 assert(SU->getInstr() && "Scheduled SUnit must have instr");
245
246 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
247 I != E; ++I) {
248 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
Andrew Trickde2109e2013-06-15 04:49:57 +0000249 unsigned MinLatency = I->getLatency();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000250#ifndef NDEBUG
251 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
252#endif
253 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
254 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
255 }
256 Bot.releaseNode(SU, SU->BotReadyCycle);
257}
258
259/// Does this SU have a hazard within the current instruction group.
260///
261/// The scheduler supports two modes of hazard recognition. The first is the
262/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
263/// supports highly complicated in-order reservation tables
264/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
265///
266/// The second is a streamlined mechanism that checks for hazards based on
267/// simple counters that the scheduler itself maintains. It explicitly checks
268/// for instruction dispatch limitations, including the number of micro-ops that
269/// can dispatch per cycle.
270///
271/// TODO: Also check whether the SU must start a new group.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000272bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000273 if (HazardRec->isEnabled())
274 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
275
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000276 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
277 if (IssueCount + uops > SchedModel->getIssueWidth())
Sergei Larin4d8986a2012-09-04 14:49:56 +0000278 return true;
279
280 return false;
281}
282
Andrew Trickd7f890e2013-12-28 21:56:47 +0000283void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
Sergei Larin4d8986a2012-09-04 14:49:56 +0000284 unsigned ReadyCycle) {
285 if (ReadyCycle < MinReadyCycle)
286 MinReadyCycle = ReadyCycle;
287
288 // Check for interlocks first. For the purpose of other heuristics, an
289 // instruction that cannot issue appears as if it's not in the ReadyQueue.
290 if (ReadyCycle > CurrCycle || checkHazard(SU))
291
292 Pending.push(SU);
293 else
294 Available.push(SU);
295}
296
297/// Move the boundary of scheduled code by one cycle.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000298void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000299 unsigned Width = SchedModel->getIssueWidth();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000300 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
301
302 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
303 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
304
305 if (!HazardRec->isEnabled()) {
306 // Bypass HazardRec virtual calls.
307 CurrCycle = NextCycle;
Sergei Larinef4cc112012-09-10 17:31:34 +0000308 } else {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000309 // Bypass getHazardType calls in case of long latency.
310 for (; CurrCycle != NextCycle; ++CurrCycle) {
311 if (isTop())
312 HazardRec->AdvanceCycle();
313 else
314 HazardRec->RecedeCycle();
315 }
316 }
317 CheckPending = true;
318
319 DEBUG(dbgs() << "*** " << Available.getName() << " cycle "
320 << CurrCycle << '\n');
321}
322
323/// Move the boundary of scheduled code by one SUnit.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000324void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
Sergei Larinef4cc112012-09-10 17:31:34 +0000325 bool startNewCycle = false;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000326
327 // Update the reservation table.
328 if (HazardRec->isEnabled()) {
329 if (!isTop() && SU->isCall) {
330 // Calls are scheduled with their preceding instructions. For bottom-up
331 // scheduling, clear the pipeline state before emitting.
332 HazardRec->Reset();
333 }
334 HazardRec->EmitInstruction(SU);
335 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000336
337 // Update DFA model.
338 startNewCycle = ResourceModel->reserveResources(SU);
339
Sergei Larin4d8986a2012-09-04 14:49:56 +0000340 // Check the instruction group dispatch limit.
341 // TODO: Check if this SU must end a dispatch group.
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000342 IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
Sergei Larinef4cc112012-09-10 17:31:34 +0000343 if (startNewCycle) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000344 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
345 bumpCycle();
346 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000347 else
348 DEBUG(dbgs() << "*** IssueCount " << IssueCount
349 << " at cycle " << CurrCycle << '\n');
Sergei Larin4d8986a2012-09-04 14:49:56 +0000350}
351
352/// Release pending ready nodes in to the available queue. This makes them
353/// visible to heuristics.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000354void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000355 // If the available queue is empty, it is safe to reset MinReadyCycle.
356 if (Available.empty())
357 MinReadyCycle = UINT_MAX;
358
359 // Check to see if any of the pending instructions are ready to issue. If
360 // so, add them to the available queue.
361 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
362 SUnit *SU = *(Pending.begin()+i);
363 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
364
365 if (ReadyCycle < MinReadyCycle)
366 MinReadyCycle = ReadyCycle;
367
368 if (ReadyCycle > CurrCycle)
369 continue;
370
371 if (checkHazard(SU))
372 continue;
373
374 Available.push(SU);
375 Pending.remove(Pending.begin()+i);
376 --i; --e;
377 }
378 CheckPending = false;
379}
380
381/// Remove SU from the ready set for this boundary.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000382void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000383 if (Available.isInQueue(SU))
384 Available.remove(Available.find(SU));
385 else {
386 assert(Pending.isInQueue(SU) && "bad ready count");
387 Pending.remove(Pending.find(SU));
388 }
389}
390
391/// If this queue only has one ready candidate, return it. As a side effect,
392/// advance the cycle until at least one node is ready. If multiple instructions
393/// are ready, return NULL.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000394SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000395 if (CheckPending)
396 releasePending();
397
398 for (unsigned i = 0; Available.empty(); ++i) {
399 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
400 "permanent hazard"); (void)i;
Sergei Larin2db64a72012-09-14 15:07:59 +0000401 ResourceModel->reserveResources(0);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000402 bumpCycle();
403 releasePending();
404 }
405 if (Available.size() == 1)
406 return *Available.begin();
407 return NULL;
408}
409
410#ifndef NDEBUG
Sergei Larinef4cc112012-09-10 17:31:34 +0000411void ConvergingVLIWScheduler::traceCandidate(const char *Label,
412 const ReadyQueue &Q,
Andrew Trick1a831342013-08-30 03:49:48 +0000413 SUnit *SU, PressureChange P) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000414 dbgs() << Label << " " << Q.getName() << " ";
415 if (P.isValid())
Andrew Trick1a831342013-08-30 03:49:48 +0000416 dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
417 << P.getUnitInc() << " ";
Sergei Larin4d8986a2012-09-04 14:49:56 +0000418 else
419 dbgs() << " ";
420 SU->dump(DAG);
421}
422#endif
423
Sergei Larinef4cc112012-09-10 17:31:34 +0000424/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
425/// of SU, return it, otherwise return null.
426static SUnit *getSingleUnscheduledPred(SUnit *SU) {
427 SUnit *OnlyAvailablePred = 0;
428 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
429 I != E; ++I) {
430 SUnit &Pred = *I->getSUnit();
431 if (!Pred.isScheduled) {
432 // We found an available, but not scheduled, predecessor. If it's the
433 // only one we have found, keep track of it... otherwise give up.
434 if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
435 return 0;
436 OnlyAvailablePred = &Pred;
437 }
438 }
439 return OnlyAvailablePred;
440}
441
442/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
443/// of SU, return it, otherwise return null.
444static SUnit *getSingleUnscheduledSucc(SUnit *SU) {
445 SUnit *OnlyAvailableSucc = 0;
446 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
447 I != E; ++I) {
448 SUnit &Succ = *I->getSUnit();
449 if (!Succ.isScheduled) {
450 // We found an available, but not scheduled, successor. If it's the
451 // only one we have found, keep track of it... otherwise give up.
452 if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ)
453 return 0;
454 OnlyAvailableSucc = &Succ;
455 }
456 }
457 return OnlyAvailableSucc;
458}
459
Sergei Larin4d8986a2012-09-04 14:49:56 +0000460// Constants used to denote relative importance of
461// heuristic components for cost computation.
462static const unsigned PriorityOne = 200;
Eli Friedman8f06d552013-09-11 00:41:02 +0000463static const unsigned PriorityTwo = 50;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000464static const unsigned ScaleTwo = 10;
465static const unsigned FactorOne = 2;
466
467/// Single point to compute overall scheduling cost.
468/// TODO: More heuristics will be used soon.
469int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
470 SchedCandidate &Candidate,
471 RegPressureDelta &Delta,
472 bool verbose) {
473 // Initial trivial priority.
474 int ResCount = 1;
475
476 // Do not waste time on a node that is already scheduled.
477 if (!SU || SU->isScheduled)
478 return ResCount;
479
480 // Forced priority is high.
481 if (SU->isScheduleHigh)
482 ResCount += PriorityOne;
483
484 // Critical path first.
Sergei Larinef4cc112012-09-10 17:31:34 +0000485 if (Q.getID() == TopQID) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000486 ResCount += (SU->getHeight() * ScaleTwo);
Sergei Larinef4cc112012-09-10 17:31:34 +0000487
488 // If resources are available for it, multiply the
489 // chance of scheduling.
490 if (Top.ResourceModel->isResourceAvailable(SU))
491 ResCount <<= FactorOne;
492 } else {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000493 ResCount += (SU->getDepth() * ScaleTwo);
494
Sergei Larinef4cc112012-09-10 17:31:34 +0000495 // If resources are available for it, multiply the
496 // chance of scheduling.
497 if (Bot.ResourceModel->isResourceAvailable(SU))
498 ResCount <<= FactorOne;
499 }
500
501 unsigned NumNodesBlocking = 0;
502 if (Q.getID() == TopQID) {
503 // How many SUs does it block from scheduling?
504 // Look at all of the successors of this node.
505 // Count the number of nodes that
506 // this node is the sole unscheduled node for.
507 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
508 I != E; ++I)
509 if (getSingleUnscheduledPred(I->getSUnit()) == SU)
510 ++NumNodesBlocking;
511 } else {
512 // How many unscheduled predecessors block this node?
513 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
514 I != E; ++I)
515 if (getSingleUnscheduledSucc(I->getSUnit()) == SU)
516 ++NumNodesBlocking;
517 }
518 ResCount += (NumNodesBlocking * ScaleTwo);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000519
520 // Factor in reg pressure as a heuristic.
Eli Friedman8f06d552013-09-11 00:41:02 +0000521 ResCount -= (Delta.Excess.getUnitInc()*PriorityTwo);
522 ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityTwo);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000523
524 DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")");
525
526 return ResCount;
527}
528
529/// Pick the best candidate from the top queue.
530///
531/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
532/// DAG building. To adjust for the current scheduling location we need to
533/// maintain the number of vreg uses remaining to be top-scheduled.
534ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
535pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker,
536 SchedCandidate &Candidate) {
537 DEBUG(Q.dump());
538
539 // getMaxPressureDelta temporarily modifies the tracker.
540 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
541
542 // BestSU remains NULL if no top candidates beat the best existing candidate.
543 CandResult FoundCandidate = NoCand;
544 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
545 RegPressureDelta RPDelta;
546 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
547 DAG->getRegionCriticalPSets(),
548 DAG->getRegPressure().MaxSetPressure);
549
550 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
551
552 // Initialize the candidate if needed.
553 if (!Candidate.SU) {
554 Candidate.SU = *I;
555 Candidate.RPDelta = RPDelta;
556 Candidate.SCost = CurrentCost;
557 FoundCandidate = NodeOrder;
558 continue;
559 }
560
Sergei Larin4d8986a2012-09-04 14:49:56 +0000561 // Best cost.
562 if (CurrentCost > Candidate.SCost) {
563 DEBUG(traceCandidate("CCAND", Q, *I));
564 Candidate.SU = *I;
565 Candidate.RPDelta = RPDelta;
566 Candidate.SCost = CurrentCost;
567 FoundCandidate = BestCost;
568 continue;
569 }
570
571 // Fall through to original instruction order.
572 // Only consider node order if Candidate was chosen from this Q.
573 if (FoundCandidate == NoCand)
574 continue;
575 }
576 return FoundCandidate;
577}
578
579/// Pick the best candidate node from either the top or bottom queue.
580SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
581 // Schedule as far as possible in the direction of no choice. This is most
582 // efficient, but also provides the best heuristics for CriticalPSets.
583 if (SUnit *SU = Bot.pickOnlyChoice()) {
584 IsTopNode = false;
585 return SU;
586 }
587 if (SUnit *SU = Top.pickOnlyChoice()) {
588 IsTopNode = true;
589 return SU;
590 }
591 SchedCandidate BotCand;
592 // Prefer bottom scheduling when heuristics are silent.
593 CandResult BotResult = pickNodeFromQueue(Bot.Available,
594 DAG->getBotRPTracker(), BotCand);
595 assert(BotResult != NoCand && "failed to find the first candidate");
596
597 // If either Q has a single candidate that provides the least increase in
598 // Excess pressure, we can immediately schedule from that Q.
599 //
600 // RegionCriticalPSets summarizes the pressure within the scheduled region and
601 // affects picking from either Q. If scheduling in one direction must
602 // increase pressure for one of the excess PSets, then schedule in that
603 // direction first to provide more freedom in the other direction.
604 if (BotResult == SingleExcess || BotResult == SingleCritical) {
605 IsTopNode = false;
606 return BotCand.SU;
607 }
608 // Check if the top Q has a better candidate.
609 SchedCandidate TopCand;
610 CandResult TopResult = pickNodeFromQueue(Top.Available,
611 DAG->getTopRPTracker(), TopCand);
612 assert(TopResult != NoCand && "failed to find the first candidate");
613
614 if (TopResult == SingleExcess || TopResult == SingleCritical) {
615 IsTopNode = true;
616 return TopCand.SU;
617 }
618 // If either Q has a single candidate that minimizes pressure above the
619 // original region's pressure pick it.
620 if (BotResult == SingleMax) {
621 IsTopNode = false;
622 return BotCand.SU;
623 }
624 if (TopResult == SingleMax) {
625 IsTopNode = true;
626 return TopCand.SU;
627 }
628 if (TopCand.SCost > BotCand.SCost) {
629 IsTopNode = true;
630 return TopCand.SU;
631 }
632 // Otherwise prefer the bottom candidate in node order.
633 IsTopNode = false;
634 return BotCand.SU;
635}
636
637/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
638SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
639 if (DAG->top() == DAG->bottom()) {
640 assert(Top.Available.empty() && Top.Pending.empty() &&
641 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
642 return NULL;
643 }
644 SUnit *SU;
Andrew Trick7a8e1002012-09-11 00:39:15 +0000645 if (llvm::ForceTopDown) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000646 SU = Top.pickOnlyChoice();
647 if (!SU) {
648 SchedCandidate TopCand;
649 CandResult TopResult =
650 pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand);
651 assert(TopResult != NoCand && "failed to find the first candidate");
652 (void)TopResult;
653 SU = TopCand.SU;
654 }
655 IsTopNode = true;
Andrew Trick7a8e1002012-09-11 00:39:15 +0000656 } else if (llvm::ForceBottomUp) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000657 SU = Bot.pickOnlyChoice();
658 if (!SU) {
659 SchedCandidate BotCand;
660 CandResult BotResult =
661 pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand);
662 assert(BotResult != NoCand && "failed to find the first candidate");
663 (void)BotResult;
664 SU = BotCand.SU;
665 }
666 IsTopNode = false;
667 } else {
668 SU = pickNodeBidrectional(IsTopNode);
669 }
670 if (SU->isTopReady())
671 Top.removeReady(SU);
672 if (SU->isBottomReady())
673 Bot.removeReady(SU);
674
675 DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
676 << " Scheduling Instruction in cycle "
677 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n';
678 SU->dump(DAG));
679 return SU;
680}
681
682/// Update the scheduler's state after scheduling a node. This is the same node
Sergei Larinef4cc112012-09-10 17:31:34 +0000683/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
684/// to update it's state based on the current cycle before MachineSchedStrategy
685/// does.
Sergei Larin4d8986a2012-09-04 14:49:56 +0000686void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
687 if (IsTopNode) {
688 SU->TopReadyCycle = Top.CurrCycle;
689 Top.bumpNode(SU);
Sergei Larinef4cc112012-09-10 17:31:34 +0000690 } else {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000691 SU->BotReadyCycle = Bot.CurrCycle;
692 Bot.bumpNode(SU);
693 }
694}