blob: 0e6555024303c872ece514fbfb9df65fca54c182 [file] [log] [blame]
Sergei Larin4d8986a2012-09-04 14:49:56 +00001//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Sergei Larin4d8986a2012-09-04 14:49:56 +00006//
7//===----------------------------------------------------------------------===//
8//
9// MachineScheduler schedules machine instructions after phi elimination. It
10// preserves LiveIntervals so it can be invoked before register allocation.
11//
12//===----------------------------------------------------------------------===//
13
Sergei Larin4d8986a2012-09-04 14:49:56 +000014#include "HexagonMachineScheduler.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000015#include "HexagonInstrInfo.h"
Krzysztof Parzyszek9be66732016-07-15 17:48:09 +000016#include "HexagonSubtarget.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000017#include "llvm/ADT/SmallVector.h"
18#include "llvm/CodeGen/DFAPacketizer.h"
19#include "llvm/CodeGen/MachineBasicBlock.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstr.h"
Jakub Staszakdf17ddd2013-03-10 13:11:23 +000022#include "llvm/CodeGen/MachineLoopInfo.h"
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +000023#include "llvm/CodeGen/RegisterClassInfo.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000024#include "llvm/CodeGen/RegisterPressure.h"
25#include "llvm/CodeGen/ScheduleDAG.h"
26#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
David Blaikie3f833ed2017-11-08 01:01:31 +000027#include "llvm/CodeGen/TargetInstrInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000028#include "llvm/CodeGen/TargetOpcodes.h"
29#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000030#include "llvm/CodeGen/TargetSchedule.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000031#include "llvm/CodeGen/TargetSubtargetInfo.h"
Jakub Staszakdf17ddd2013-03-10 13:11:23 +000032#include "llvm/IR/Function.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000033#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/raw_ostream.h"
Eugene Zelenko3b873362017-09-28 22:27:31 +000036#include <algorithm>
37#include <cassert>
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +000038#include <iomanip>
Eugene Zelenko3b873362017-09-28 22:27:31 +000039#include <limits>
40#include <memory>
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +000041#include <sstream>
42
Eugene Zelenko3b873362017-09-28 22:27:31 +000043using namespace llvm;
44
45#define DEBUG_TYPE "machine-scheduler"
46
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +000047static cl::opt<bool> IgnoreBBRegPressure("ignore-bb-reg-pressure",
48 cl::Hidden, cl::ZeroOrMore, cl::init(false));
49
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +000050static cl::opt<bool> UseNewerCandidate("use-newer-candidate",
51 cl::Hidden, cl::ZeroOrMore, cl::init(true));
52
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +000053static cl::opt<unsigned> SchedDebugVerboseLevel("misched-verbose-level",
54 cl::Hidden, cl::ZeroOrMore, cl::init(1));
55
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +000056// Check if the scheduler should penalize instructions that are available to
57// early due to a zero-latency dependence.
58static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
59 cl::ZeroOrMore, cl::init(true));
60
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +000061// This value is used to determine if a register class is a high pressure set.
62// We compute the maximum number of registers needed and divided by the total
63// available. Then, we compare the result to this value.
64static cl::opt<float> RPThreshold("hexagon-reg-pressure", cl::Hidden,
65 cl::init(0.75f), cl::desc("High register pressure threhold."));
66
67/// Return true if there is a dependence between SUd and SUu.
68static bool hasDependence(const SUnit *SUd, const SUnit *SUu,
69 const HexagonInstrInfo &QII) {
70 if (SUd->Succs.size() == 0)
71 return false;
72
73 // Enable .cur formation.
74 if (QII.mayBeCurLoad(*SUd->getInstr()))
75 return false;
76
77 if (QII.canExecuteInBundle(*SUd->getInstr(), *SUu->getInstr()))
78 return false;
79
80 for (const auto &S : SUd->Succs) {
81 // Since we do not add pseudos to packets, might as well
82 // ignore order dependencies.
83 if (S.isCtrl())
84 continue;
85
86 if (S.getSUnit() == SUu && S.getLatency() > 0)
87 return true;
88 }
89 return false;
Krzysztof Parzyszek6c715e12016-07-15 20:16:03 +000090}
91
Sergei Larin4d8986a2012-09-04 14:49:56 +000092/// Check if scheduling of this SU is possible
93/// in the current packet.
94/// It is _not_ precise (statefull), it is more like
95/// another heuristic. Many corner cases are figured
96/// empirically.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +000097bool VLIWResourceModel::isResourceAvailable(SUnit *SU, bool IsTop) {
Sergei Larin4d8986a2012-09-04 14:49:56 +000098 if (!SU || !SU->getInstr())
99 return false;
100
101 // First see if the pipeline could receive this instruction
102 // in the current cycle.
103 switch (SU->getInstr()->getOpcode()) {
104 default:
Duncan P. N. Exon Smith57022872016-02-27 19:09:00 +0000105 if (!ResourcesModel->canReserveResources(*SU->getInstr()))
Sergei Larin4d8986a2012-09-04 14:49:56 +0000106 return false;
Reid Kleckner4dc0b1a2018-11-01 19:54:45 +0000107 break;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000108 case TargetOpcode::EXTRACT_SUBREG:
109 case TargetOpcode::INSERT_SUBREG:
110 case TargetOpcode::SUBREG_TO_REG:
111 case TargetOpcode::REG_SEQUENCE:
112 case TargetOpcode::IMPLICIT_DEF:
113 case TargetOpcode::COPY:
114 case TargetOpcode::INLINEASM:
Craig Topper784929d2019-02-08 20:48:56 +0000115 case TargetOpcode::INLINEASM_BR:
Sergei Larin4d8986a2012-09-04 14:49:56 +0000116 break;
117 }
118
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000119 MachineBasicBlock *MBB = SU->getInstr()->getParent();
120 auto &QST = MBB->getParent()->getSubtarget<HexagonSubtarget>();
121 const auto &QII = *QST.getInstrInfo();
Krzysztof Parzyszek786333f2016-07-18 16:05:27 +0000122
Sergei Larin4d8986a2012-09-04 14:49:56 +0000123 // Now see if there are no other dependencies to instructions already
124 // in the packet.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000125 if (IsTop) {
126 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
127 if (hasDependence(Packet[i], SU, QII))
Sergei Larin4d8986a2012-09-04 14:49:56 +0000128 return false;
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000129 } else {
130 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
131 if (hasDependence(SU, Packet[i], QII))
132 return false;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000133 }
134 return true;
135}
136
137/// Keep track of available resources.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000138bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
Sergei Larinef4cc112012-09-10 17:31:34 +0000139 bool startNewCycle = false;
Sergei Larin2db64a72012-09-14 15:07:59 +0000140 // Artificially reset state.
141 if (!SU) {
142 ResourcesModel->clearResources();
143 Packet.clear();
144 TotalPackets++;
145 return false;
146 }
Krzysztof Parzyszek4c6b65f2018-03-20 17:03:27 +0000147 // If this SU does not fit in the packet or the packet is now full
Sergei Larin4d8986a2012-09-04 14:49:56 +0000148 // start a new one.
Krzysztof Parzyszek4c6b65f2018-03-20 17:03:27 +0000149 if (!isResourceAvailable(SU, IsTop) ||
150 Packet.size() >= SchedModel->getIssueWidth()) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000151 ResourcesModel->clearResources();
152 Packet.clear();
153 TotalPackets++;
Sergei Larinef4cc112012-09-10 17:31:34 +0000154 startNewCycle = true;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000155 }
156
157 switch (SU->getInstr()->getOpcode()) {
158 default:
Duncan P. N. Exon Smith57022872016-02-27 19:09:00 +0000159 ResourcesModel->reserveResources(*SU->getInstr());
Sergei Larin4d8986a2012-09-04 14:49:56 +0000160 break;
161 case TargetOpcode::EXTRACT_SUBREG:
162 case TargetOpcode::INSERT_SUBREG:
163 case TargetOpcode::SUBREG_TO_REG:
164 case TargetOpcode::REG_SEQUENCE:
165 case TargetOpcode::IMPLICIT_DEF:
166 case TargetOpcode::KILL:
Rafael Espindolab1f25f12014-03-07 06:08:31 +0000167 case TargetOpcode::CFI_INSTRUCTION:
Sergei Larin4d8986a2012-09-04 14:49:56 +0000168 case TargetOpcode::EH_LABEL:
169 case TargetOpcode::COPY:
170 case TargetOpcode::INLINEASM:
Craig Topper784929d2019-02-08 20:48:56 +0000171 case TargetOpcode::INLINEASM_BR:
Sergei Larin4d8986a2012-09-04 14:49:56 +0000172 break;
173 }
174 Packet.push_back(SU);
175
176#ifndef NDEBUG
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000177 LLVM_DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000178 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000179 LLVM_DEBUG(dbgs() << "\t[" << i << "] SU(");
180 LLVM_DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
181 LLVM_DEBUG(Packet[i]->getInstr()->dump());
Sergei Larin4d8986a2012-09-04 14:49:56 +0000182 }
183#endif
184
Sergei Larinef4cc112012-09-10 17:31:34 +0000185 return startNewCycle;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000186}
187
Sergei Larin4d8986a2012-09-04 14:49:56 +0000188/// schedule - Called back from MachineScheduler::runOnMachineFunction
189/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
190/// only includes instructions that have DAG nodes, not scheduling boundaries.
191void VLIWMachineScheduler::schedule() {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000192 LLVM_DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
193 << printMBBReference(*BB) << " " << BB->getName()
194 << " in_func " << BB->getParent()->getName()
195 << " at loop depth " << MLI->getLoopDepth(BB) << " \n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000196
Andrew Trick7a8e1002012-09-11 00:39:15 +0000197 buildDAGWithRegPressure();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000198
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000199 Topo.InitDAGTopologicalSorting();
200
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000201 // Postprocess the DAG to add platform-specific artificial dependencies.
202 postprocessDAG();
203
Andrew Tricke2c3f5c2013-01-25 06:33:57 +0000204 SmallVector<SUnit*, 8> TopRoots, BotRoots;
205 findRootsAndBiasEdges(TopRoots, BotRoots);
206
207 // Initialize the strategy before modifying the DAG.
208 SchedImpl->initialize(this);
209
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000210 LLVM_DEBUG(unsigned maxH = 0;
211 for (unsigned su = 0, e = SUnits.size(); su != e;
212 ++su) if (SUnits[su].getHeight() > maxH) maxH =
213 SUnits[su].getHeight();
214 dbgs() << "Max Height " << maxH << "\n";);
215 LLVM_DEBUG(unsigned maxD = 0;
216 for (unsigned su = 0, e = SUnits.size(); su != e;
217 ++su) if (SUnits[su].getDepth() > maxD) maxD =
218 SUnits[su].getDepth();
219 dbgs() << "Max Depth " << maxD << "\n";);
Matthias Braun726e12c2018-09-19 00:23:35 +0000220 LLVM_DEBUG(dump());
Sergei Larin4d8986a2012-09-04 14:49:56 +0000221
Andrew Tricke2c3f5c2013-01-25 06:33:57 +0000222 initQueues(TopRoots, BotRoots);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000223
Sergei Larin4d8986a2012-09-04 14:49:56 +0000224 bool IsTopNode = false;
James Y Knighte72b0db2015-09-18 18:52:20 +0000225 while (true) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000226 LLVM_DEBUG(
227 dbgs() << "** VLIWMachineScheduler::schedule picking next node\n");
James Y Knighte72b0db2015-09-18 18:52:20 +0000228 SUnit *SU = SchedImpl->pickNode(IsTopNode);
229 if (!SU) break;
230
Sergei Larin4d8986a2012-09-04 14:49:56 +0000231 if (!checkSchedLimit())
232 break;
233
Andrew Trick7a8e1002012-09-11 00:39:15 +0000234 scheduleMI(SU, IsTopNode);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000235
Andrew Trickd7f890e2013-12-28 21:56:47 +0000236 // Notify the scheduling strategy after updating the DAG.
237 SchedImpl->schedNode(SU, IsTopNode);
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000238
239 updateQueues(SU, IsTopNode);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000240 }
241 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
242
Sergei Larin4d8986a2012-09-04 14:49:56 +0000243 placeDebugValues();
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000244
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000245 LLVM_DEBUG({
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000246 dbgs() << "*** Final schedule for "
247 << printMBBReference(*begin()->getParent()) << " ***\n";
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000248 dumpSchedule();
249 dbgs() << '\n';
250 });
Sergei Larin4d8986a2012-09-04 14:49:56 +0000251}
252
Andrew Trick7a8e1002012-09-11 00:39:15 +0000253void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
254 DAG = static_cast<VLIWMachineScheduler*>(dag);
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000255 SchedModel = DAG->getSchedModel();
Andrew Trick553e0fe2013-02-13 19:22:27 +0000256
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000257 Top.init(DAG, SchedModel);
258 Bot.init(DAG, SchedModel);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000259
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000260 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
261 // are disabled, then these HazardRecs will be disabled.
262 const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
Eric Christopherf8b8e4a2015-02-02 22:11:40 +0000263 const TargetSubtargetInfo &STI = DAG->MF.getSubtarget();
264 const TargetInstrInfo *TII = STI.getInstrInfo();
Andrew Trick553e0fe2013-02-13 19:22:27 +0000265 delete Top.HazardRec;
266 delete Bot.HazardRec;
Eric Christopherf8b8e4a2015-02-02 22:11:40 +0000267 Top.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
268 Bot.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000269
Chandler Carruthc18e39c2013-07-27 10:48:45 +0000270 delete Top.ResourceModel;
271 delete Bot.ResourceModel;
Eric Christopherf8b8e4a2015-02-02 22:11:40 +0000272 Top.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
273 Bot.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
Sergei Larinef4cc112012-09-10 17:31:34 +0000274
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000275 const std::vector<unsigned> &MaxPressure =
276 DAG->getRegPressure().MaxSetPressure;
277 HighPressureSets.assign(MaxPressure.size(), 0);
278 for (unsigned i = 0, e = MaxPressure.size(); i < e; ++i) {
279 unsigned Limit = DAG->getRegClassInfo()->getRegPressureSetLimit(i);
280 HighPressureSets[i] =
Krzysztof Parzyszek2c4231d2018-03-20 13:28:46 +0000281 ((float) MaxPressure[i] > ((float) Limit * RPThreshold));
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000282 }
283
Eugene Zelenko3b873362017-09-28 22:27:31 +0000284 assert((!ForceTopDown || !ForceBottomUp) &&
Sergei Larin4d8986a2012-09-04 14:49:56 +0000285 "-misched-topdown incompatible with -misched-bottomup");
286}
287
288void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
289 if (SU->isScheduled)
290 return;
291
Krzysztof Parzyszek2be7ead2016-07-18 16:15:15 +0000292 for (const SDep &PI : SU->Preds) {
293 unsigned PredReadyCycle = PI.getSUnit()->TopReadyCycle;
294 unsigned MinLatency = PI.getLatency();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000295#ifndef NDEBUG
296 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
297#endif
298 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
299 SU->TopReadyCycle = PredReadyCycle + MinLatency;
300 }
301 Top.releaseNode(SU, SU->TopReadyCycle);
302}
303
304void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
305 if (SU->isScheduled)
306 return;
307
308 assert(SU->getInstr() && "Scheduled SUnit must have instr");
309
310 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
311 I != E; ++I) {
312 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
Andrew Trickde2109e2013-06-15 04:49:57 +0000313 unsigned MinLatency = I->getLatency();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000314#ifndef NDEBUG
315 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
316#endif
317 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
318 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
319 }
320 Bot.releaseNode(SU, SU->BotReadyCycle);
321}
322
323/// Does this SU have a hazard within the current instruction group.
324///
325/// The scheduler supports two modes of hazard recognition. The first is the
326/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
327/// supports highly complicated in-order reservation tables
328/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
329///
330/// The second is a streamlined mechanism that checks for hazards based on
331/// simple counters that the scheduler itself maintains. It explicitly checks
332/// for instruction dispatch limitations, including the number of micro-ops that
333/// can dispatch per cycle.
334///
335/// TODO: Also check whether the SU must start a new group.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000336bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000337 if (HazardRec->isEnabled())
338 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
339
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000340 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
341 if (IssueCount + uops > SchedModel->getIssueWidth())
Sergei Larin4d8986a2012-09-04 14:49:56 +0000342 return true;
343
344 return false;
345}
346
Andrew Trickd7f890e2013-12-28 21:56:47 +0000347void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
Sergei Larin4d8986a2012-09-04 14:49:56 +0000348 unsigned ReadyCycle) {
349 if (ReadyCycle < MinReadyCycle)
350 MinReadyCycle = ReadyCycle;
351
352 // Check for interlocks first. For the purpose of other heuristics, an
353 // instruction that cannot issue appears as if it's not in the ReadyQueue.
354 if (ReadyCycle > CurrCycle || checkHazard(SU))
355
356 Pending.push(SU);
357 else
358 Available.push(SU);
359}
360
361/// Move the boundary of scheduled code by one cycle.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000362void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000363 unsigned Width = SchedModel->getIssueWidth();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000364 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
365
Eugene Zelenko3b873362017-09-28 22:27:31 +0000366 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
367 "MinReadyCycle uninitialized");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000368 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
369
370 if (!HazardRec->isEnabled()) {
371 // Bypass HazardRec virtual calls.
372 CurrCycle = NextCycle;
Sergei Larinef4cc112012-09-10 17:31:34 +0000373 } else {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000374 // Bypass getHazardType calls in case of long latency.
375 for (; CurrCycle != NextCycle; ++CurrCycle) {
376 if (isTop())
377 HazardRec->AdvanceCycle();
378 else
379 HazardRec->RecedeCycle();
380 }
381 }
382 CheckPending = true;
383
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000384 LLVM_DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
385 << CurrCycle << '\n');
Sergei Larin4d8986a2012-09-04 14:49:56 +0000386}
387
388/// Move the boundary of scheduled code by one SUnit.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000389void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
Sergei Larinef4cc112012-09-10 17:31:34 +0000390 bool startNewCycle = false;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000391
392 // Update the reservation table.
393 if (HazardRec->isEnabled()) {
394 if (!isTop() && SU->isCall) {
395 // Calls are scheduled with their preceding instructions. For bottom-up
396 // scheduling, clear the pipeline state before emitting.
397 HazardRec->Reset();
398 }
399 HazardRec->EmitInstruction(SU);
400 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000401
402 // Update DFA model.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000403 startNewCycle = ResourceModel->reserveResources(SU, isTop());
Sergei Larinef4cc112012-09-10 17:31:34 +0000404
Sergei Larin4d8986a2012-09-04 14:49:56 +0000405 // Check the instruction group dispatch limit.
406 // TODO: Check if this SU must end a dispatch group.
Andrew Trickdd79f0f2012-10-10 05:43:09 +0000407 IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
Sergei Larinef4cc112012-09-10 17:31:34 +0000408 if (startNewCycle) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000409 LLVM_DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
Sergei Larin4d8986a2012-09-04 14:49:56 +0000410 bumpCycle();
411 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000412 else
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000413 LLVM_DEBUG(dbgs() << "*** IssueCount " << IssueCount << " at cycle "
414 << CurrCycle << '\n');
Sergei Larin4d8986a2012-09-04 14:49:56 +0000415}
416
417/// Release pending ready nodes in to the available queue. This makes them
418/// visible to heuristics.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000419void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000420 // If the available queue is empty, it is safe to reset MinReadyCycle.
421 if (Available.empty())
Eugene Zelenko3b873362017-09-28 22:27:31 +0000422 MinReadyCycle = std::numeric_limits<unsigned>::max();
Sergei Larin4d8986a2012-09-04 14:49:56 +0000423
424 // Check to see if any of the pending instructions are ready to issue. If
425 // so, add them to the available queue.
426 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
427 SUnit *SU = *(Pending.begin()+i);
428 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
429
430 if (ReadyCycle < MinReadyCycle)
431 MinReadyCycle = ReadyCycle;
432
433 if (ReadyCycle > CurrCycle)
434 continue;
435
436 if (checkHazard(SU))
437 continue;
438
439 Available.push(SU);
440 Pending.remove(Pending.begin()+i);
441 --i; --e;
442 }
443 CheckPending = false;
444}
445
446/// Remove SU from the ready set for this boundary.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000447void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000448 if (Available.isInQueue(SU))
449 Available.remove(Available.find(SU));
450 else {
451 assert(Pending.isInQueue(SU) && "bad ready count");
452 Pending.remove(Pending.find(SU));
453 }
454}
455
456/// If this queue only has one ready candidate, return it. As a side effect,
457/// advance the cycle until at least one node is ready. If multiple instructions
458/// are ready, return NULL.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000459SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000460 if (CheckPending)
461 releasePending();
462
Krzysztof Parzyszek73be83d2018-03-20 16:22:06 +0000463 auto AdvanceCycle = [this]() {
464 if (Available.empty())
465 return true;
466 if (Available.size() == 1 && Pending.size() > 0)
467 return !ResourceModel->isResourceAvailable(*Available.begin(), isTop()) ||
468 getWeakLeft(*Available.begin(), isTop()) != 0;
469 return false;
470 };
471 for (unsigned i = 0; AdvanceCycle(); ++i) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000472 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
473 "permanent hazard"); (void)i;
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000474 ResourceModel->reserveResources(nullptr, isTop());
Sergei Larin4d8986a2012-09-04 14:49:56 +0000475 bumpCycle();
476 releasePending();
477 }
478 if (Available.size() == 1)
479 return *Available.begin();
Craig Topper062a2ba2014-04-25 05:30:21 +0000480 return nullptr;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000481}
482
483#ifndef NDEBUG
Sergei Larinef4cc112012-09-10 17:31:34 +0000484void ConvergingVLIWScheduler::traceCandidate(const char *Label,
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000485 const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000486 dbgs() << Label << " " << Q.getName() << " ";
487 if (P.isValid())
Andrew Trick1a831342013-08-30 03:49:48 +0000488 dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
489 << P.getUnitInc() << " ";
Sergei Larin4d8986a2012-09-04 14:49:56 +0000490 else
491 dbgs() << " ";
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000492 dbgs() << "cost(" << Cost << ")\t";
Matthias Braun726e12c2018-09-19 00:23:35 +0000493 DAG->dumpNode(*SU);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000494}
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000495
496// Very detailed queue dump, to be used with higher verbosity levels.
497void ConvergingVLIWScheduler::readyQueueVerboseDump(
498 const RegPressureTracker &RPTracker, SchedCandidate &Candidate,
499 ReadyQueue &Q) {
500 RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
501
502 dbgs() << ">>> " << Q.getName() << "\n";
503 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
504 RegPressureDelta RPDelta;
505 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
506 DAG->getRegionCriticalPSets(),
507 DAG->getRegPressure().MaxSetPressure);
508 std::stringstream dbgstr;
509 dbgstr << "SU(" << std::setw(3) << (*I)->NodeNum << ")";
510 dbgs() << dbgstr.str();
511 SchedulingCost(Q, *I, Candidate, RPDelta, true);
512 dbgs() << "\t";
513 (*I)->getInstr()->dump();
514 }
515 dbgs() << "\n";
516}
Sergei Larin4d8986a2012-09-04 14:49:56 +0000517#endif
518
Nirav Dave6a38cc62017-06-08 18:49:25 +0000519/// isSingleUnscheduledPred - If SU2 is the only unscheduled predecessor
520/// of SU, return true (we may have duplicates)
521static inline bool isSingleUnscheduledPred(SUnit *SU, SUnit *SU2) {
522 if (SU->NumPredsLeft == 0)
523 return false;
524
525 for (auto &Pred : SU->Preds) {
526 // We found an available, but not scheduled, predecessor.
527 if (!Pred.getSUnit()->isScheduled && (Pred.getSUnit() != SU2))
528 return false;
Sergei Larinef4cc112012-09-10 17:31:34 +0000529 }
Nirav Dave6a38cc62017-06-08 18:49:25 +0000530
531 return true;
Sergei Larinef4cc112012-09-10 17:31:34 +0000532}
533
Nirav Dave6a38cc62017-06-08 18:49:25 +0000534/// isSingleUnscheduledSucc - If SU2 is the only unscheduled successor
535/// of SU, return true (we may have duplicates)
536static inline bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2) {
537 if (SU->NumSuccsLeft == 0)
538 return false;
539
540 for (auto &Succ : SU->Succs) {
541 // We found an available, but not scheduled, successor.
542 if (!Succ.getSUnit()->isScheduled && (Succ.getSUnit() != SU2))
543 return false;
Sergei Larinef4cc112012-09-10 17:31:34 +0000544 }
Nirav Dave6a38cc62017-06-08 18:49:25 +0000545 return true;
Sergei Larinef4cc112012-09-10 17:31:34 +0000546}
547
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000548/// Check if the instruction changes the register pressure of a register in the
549/// high pressure set. The function returns a negative value if the pressure
550/// decreases and a positive value is the pressure increases. If the instruction
551/// doesn't use a high pressure register or doesn't change the register
552/// pressure, then return 0.
553int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
554 PressureDiff &PD = DAG->getPressureDiff(SU);
555 for (auto &P : PD) {
556 if (!P.isValid())
557 continue;
558 // The pressure differences are computed bottom-up, so the comparision for
559 // an increase is positive in the bottom direction, but negative in the
560 // top-down direction.
561 if (HighPressureSets[P.getPSet()])
562 return (isBotUp ? P.getUnitInc() : -P.getUnitInc());
563 }
564 return 0;
565}
566
Sergei Larin4d8986a2012-09-04 14:49:56 +0000567// Constants used to denote relative importance of
568// heuristic components for cost computation.
569static const unsigned PriorityOne = 200;
Eli Friedman8f06d552013-09-11 00:41:02 +0000570static const unsigned PriorityTwo = 50;
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000571static const unsigned PriorityThree = 75;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000572static const unsigned ScaleTwo = 10;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000573
574/// Single point to compute overall scheduling cost.
575/// TODO: More heuristics will be used soon.
576int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
577 SchedCandidate &Candidate,
578 RegPressureDelta &Delta,
579 bool verbose) {
580 // Initial trivial priority.
581 int ResCount = 1;
582
583 // Do not waste time on a node that is already scheduled.
584 if (!SU || SU->isScheduled)
585 return ResCount;
586
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000587 LLVM_DEBUG(if (verbose) dbgs()
588 << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
Sergei Larin4d8986a2012-09-04 14:49:56 +0000589 // Forced priority is high.
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000590 if (SU->isScheduleHigh) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000591 ResCount += PriorityOne;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000592 LLVM_DEBUG(dbgs() << "H|");
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000593 }
Sergei Larin4d8986a2012-09-04 14:49:56 +0000594
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000595 unsigned IsAvailableAmt = 0;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000596 // Critical path first.
Sergei Larinef4cc112012-09-10 17:31:34 +0000597 if (Q.getID() == TopQID) {
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000598 if (Top.isLatencyBound(SU)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000599 LLVM_DEBUG(if (verbose) dbgs() << "LB|");
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000600 ResCount += (SU->getHeight() * ScaleTwo);
601 }
Sergei Larinef4cc112012-09-10 17:31:34 +0000602
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000603 LLVM_DEBUG(if (verbose) {
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000604 std::stringstream dbgstr;
605 dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";
606 dbgs() << dbgstr.str();
607 });
608
Sergei Larinef4cc112012-09-10 17:31:34 +0000609 // If resources are available for it, multiply the
610 // chance of scheduling.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000611 if (Top.ResourceModel->isResourceAvailable(SU, true)) {
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000612 IsAvailableAmt = (PriorityTwo + PriorityThree);
613 ResCount += IsAvailableAmt;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000614 LLVM_DEBUG(if (verbose) dbgs() << "A|");
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000615 } else
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000616 LLVM_DEBUG(if (verbose) dbgs() << " |");
Sergei Larinef4cc112012-09-10 17:31:34 +0000617 } else {
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000618 if (Bot.isLatencyBound(SU)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000619 LLVM_DEBUG(if (verbose) dbgs() << "LB|");
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000620 ResCount += (SU->getDepth() * ScaleTwo);
621 }
Sergei Larin4d8986a2012-09-04 14:49:56 +0000622
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000623 LLVM_DEBUG(if (verbose) {
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000624 std::stringstream dbgstr;
625 dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";
626 dbgs() << dbgstr.str();
627 });
628
Sergei Larinef4cc112012-09-10 17:31:34 +0000629 // If resources are available for it, multiply the
630 // chance of scheduling.
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000631 if (Bot.ResourceModel->isResourceAvailable(SU, false)) {
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000632 IsAvailableAmt = (PriorityTwo + PriorityThree);
633 ResCount += IsAvailableAmt;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000634 LLVM_DEBUG(if (verbose) dbgs() << "A|");
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000635 } else
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000636 LLVM_DEBUG(if (verbose) dbgs() << " |");
Sergei Larinef4cc112012-09-10 17:31:34 +0000637 }
638
639 unsigned NumNodesBlocking = 0;
640 if (Q.getID() == TopQID) {
641 // How many SUs does it block from scheduling?
642 // Look at all of the successors of this node.
643 // Count the number of nodes that
644 // this node is the sole unscheduled node for.
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000645 if (Top.isLatencyBound(SU))
646 for (const SDep &SI : SU->Succs)
647 if (isSingleUnscheduledPred(SI.getSUnit(), SU))
648 ++NumNodesBlocking;
Sergei Larinef4cc112012-09-10 17:31:34 +0000649 } else {
650 // How many unscheduled predecessors block this node?
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000651 if (Bot.isLatencyBound(SU))
652 for (const SDep &PI : SU->Preds)
653 if (isSingleUnscheduledSucc(PI.getSUnit(), SU))
654 ++NumNodesBlocking;
Sergei Larinef4cc112012-09-10 17:31:34 +0000655 }
656 ResCount += (NumNodesBlocking * ScaleTwo);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000657
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000658 LLVM_DEBUG(if (verbose) {
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000659 std::stringstream dbgstr;
660 dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";
661 dbgs() << dbgstr.str();
662 });
663
Sergei Larin4d8986a2012-09-04 14:49:56 +0000664 // Factor in reg pressure as a heuristic.
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000665 if (!IgnoreBBRegPressure) {
666 // Decrease priority by the amount that register pressure exceeds the limit.
667 ResCount -= (Delta.Excess.getUnitInc()*PriorityOne);
668 // Decrease priority if register pressure exceeds the limit.
669 ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityOne);
670 // Decrease priority slightly if register pressure would increase over the
671 // current maximum.
672 ResCount -= (Delta.CurrentMax.getUnitInc()*PriorityTwo);
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000673 // If there are register pressure issues, then we remove the value added for
674 // the instruction being available. The rationale is that we really don't
675 // want to schedule an instruction that causes a spill.
676 if (IsAvailableAmt && pressureChange(SU, Q.getID() != TopQID) > 0 &&
677 (Delta.Excess.getUnitInc() || Delta.CriticalMax.getUnitInc() ||
678 Delta.CurrentMax.getUnitInc()))
679 ResCount -= IsAvailableAmt;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000680 LLVM_DEBUG(if (verbose) {
681 dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
682 << Delta.CriticalMax.getUnitInc() << "/"
683 << Delta.CurrentMax.getUnitInc() << ")|";
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000684 });
685 }
Sergei Larin4d8986a2012-09-04 14:49:56 +0000686
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000687 // Give a little extra priority to a .cur instruction if there is a resource
688 // available for it.
Krzysztof Parzyszek6c715e12016-07-15 20:16:03 +0000689 auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
690 auto &QII = *QST.getInstrInfo();
Krzysztof Parzyszekf0b34a52016-07-29 21:49:42 +0000691 if (SU->isInstr() && QII.mayBeCurLoad(*SU->getInstr())) {
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000692 if (Q.getID() == TopQID &&
693 Top.ResourceModel->isResourceAvailable(SU, true)) {
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000694 ResCount += PriorityTwo;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000695 LLVM_DEBUG(if (verbose) dbgs() << "C|");
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000696 } else if (Q.getID() == BotQID &&
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000697 Bot.ResourceModel->isResourceAvailable(SU, false)) {
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000698 ResCount += PriorityTwo;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000699 LLVM_DEBUG(if (verbose) dbgs() << "C|");
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000700 }
701 }
702
Krzysztof Parzyszek408e3002016-07-15 21:34:02 +0000703 // Give preference to a zero latency instruction if the dependent
704 // instruction is in the current packet.
Krzysztof Parzyszek5ffd8082018-03-20 14:54:01 +0000705 if (Q.getID() == TopQID && getWeakLeft(SU, true) == 0) {
Krzysztof Parzyszek408e3002016-07-15 21:34:02 +0000706 for (const SDep &PI : SU->Preds) {
707 if (!PI.getSUnit()->getInstr()->isPseudo() && PI.isAssignedRegDep() &&
708 PI.getLatency() == 0 &&
709 Top.ResourceModel->isInPacket(PI.getSUnit())) {
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000710 ResCount += PriorityThree;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000711 LLVM_DEBUG(if (verbose) dbgs() << "Z|");
Krzysztof Parzyszek408e3002016-07-15 21:34:02 +0000712 }
713 }
Krzysztof Parzyszek5ffd8082018-03-20 14:54:01 +0000714 } else if (Q.getID() == BotQID && getWeakLeft(SU, false) == 0) {
Krzysztof Parzyszek408e3002016-07-15 21:34:02 +0000715 for (const SDep &SI : SU->Succs) {
716 if (!SI.getSUnit()->getInstr()->isPseudo() && SI.isAssignedRegDep() &&
717 SI.getLatency() == 0 &&
718 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000719 ResCount += PriorityThree;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000720 LLVM_DEBUG(if (verbose) dbgs() << "Z|");
Krzysztof Parzyszek408e3002016-07-15 21:34:02 +0000721 }
722 }
723 }
724
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000725 // If the instruction has a non-zero latency dependence with an instruction in
726 // the current packet, then it should not be scheduled yet. The case occurs
727 // when the dependent instruction is scheduled in a new packet, so the
728 // scheduler updates the current cycle and pending instructions become
729 // available.
730 if (CheckEarlyAvail) {
731 if (Q.getID() == TopQID) {
732 for (const auto &PI : SU->Preds) {
733 if (PI.getLatency() > 0 &&
734 Top.ResourceModel->isInPacket(PI.getSUnit())) {
735 ResCount -= PriorityOne;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000736 LLVM_DEBUG(if (verbose) dbgs() << "D|");
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000737 }
738 }
739 } else {
740 for (const auto &SI : SU->Succs) {
741 if (SI.getLatency() > 0 &&
742 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
743 ResCount -= PriorityOne;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000744 LLVM_DEBUG(if (verbose) dbgs() << "D|");
Krzysztof Parzyszek3467e9d2016-07-18 14:52:13 +0000745 }
746 }
747 }
748 }
749
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000750 LLVM_DEBUG(if (verbose) {
Krzysztof Parzyszekf05dc4d2016-07-18 15:47:25 +0000751 std::stringstream dbgstr;
752 dbgstr << "Total " << std::setw(4) << ResCount << ")";
753 dbgs() << dbgstr.str();
754 });
Sergei Larin4d8986a2012-09-04 14:49:56 +0000755
756 return ResCount;
757}
758
759/// Pick the best candidate from the top queue.
760///
761/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
762/// DAG building. To adjust for the current scheduling location we need to
763/// maintain the number of vreg uses remaining to be top-scheduled.
764ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000765pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
Sergei Larin4d8986a2012-09-04 14:49:56 +0000766 SchedCandidate &Candidate) {
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000767 ReadyQueue &Q = Zone.Available;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000768 LLVM_DEBUG(if (SchedDebugVerboseLevel > 1)
769 readyQueueVerboseDump(RPTracker, Candidate, Q);
770 else Q.dump(););
Sergei Larin4d8986a2012-09-04 14:49:56 +0000771
772 // getMaxPressureDelta temporarily modifies the tracker.
773 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
774
775 // BestSU remains NULL if no top candidates beat the best existing candidate.
776 CandResult FoundCandidate = NoCand;
777 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
778 RegPressureDelta RPDelta;
779 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
780 DAG->getRegionCriticalPSets(),
781 DAG->getRegPressure().MaxSetPressure);
782
783 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
784
785 // Initialize the candidate if needed.
786 if (!Candidate.SU) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000787 LLVM_DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
Sergei Larin4d8986a2012-09-04 14:49:56 +0000788 Candidate.SU = *I;
789 Candidate.RPDelta = RPDelta;
790 Candidate.SCost = CurrentCost;
791 FoundCandidate = NodeOrder;
792 continue;
793 }
794
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000795 // Choose node order for negative cost candidates. There is no good
796 // candidate in this case.
797 if (CurrentCost < 0 && Candidate.SCost < 0) {
798 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
799 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000800 LLVM_DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost));
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000801 Candidate.SU = *I;
802 Candidate.RPDelta = RPDelta;
803 Candidate.SCost = CurrentCost;
804 FoundCandidate = NodeOrder;
805 }
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000806 continue;
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000807 }
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000808
Sergei Larin4d8986a2012-09-04 14:49:56 +0000809 // Best cost.
810 if (CurrentCost > Candidate.SCost) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000811 LLVM_DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
Sergei Larin4d8986a2012-09-04 14:49:56 +0000812 Candidate.SU = *I;
813 Candidate.RPDelta = RPDelta;
814 Candidate.SCost = CurrentCost;
815 FoundCandidate = BestCost;
816 continue;
817 }
818
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000819 // Choose an instruction that does not depend on an artificial edge.
820 unsigned CurrWeak = getWeakLeft(*I, (Q.getID() == TopQID));
821 unsigned CandWeak = getWeakLeft(Candidate.SU, (Q.getID() == TopQID));
822 if (CurrWeak != CandWeak) {
823 if (CurrWeak < CandWeak) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000824 LLVM_DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost));
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000825 Candidate.SU = *I;
826 Candidate.RPDelta = RPDelta;
827 Candidate.SCost = CurrentCost;
828 FoundCandidate = Weak;
Krzysztof Parzyszek393b3792016-07-18 15:17:10 +0000829 }
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000830 continue;
Krzysztof Parzyszek393b3792016-07-18 15:17:10 +0000831 }
832
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000833 if (CurrentCost == Candidate.SCost && Zone.isLatencyBound(*I)) {
834 unsigned CurrSize, CandSize;
835 if (Q.getID() == TopQID) {
836 CurrSize = (*I)->Succs.size();
837 CandSize = Candidate.SU->Succs.size();
838 } else {
839 CurrSize = (*I)->Preds.size();
840 CandSize = Candidate.SU->Preds.size();
841 }
842 if (CurrSize > CandSize) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000843 LLVM_DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000844 Candidate.SU = *I;
845 Candidate.RPDelta = RPDelta;
846 Candidate.SCost = CurrentCost;
847 FoundCandidate = BestCost;
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000848 }
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000849 // Keep the old candidate if it's a better candidate. That is, don't use
850 // the subsequent tie breaker.
851 if (CurrSize != CandSize)
852 continue;
Krzysztof Parzyszek748d3ef2016-07-18 14:23:10 +0000853 }
854
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000855 // Tie breaker.
856 // To avoid scheduling indeterminism, we need a tie breaker
857 // for the case when cost is identical for two nodes.
858 if (UseNewerCandidate && CurrentCost == Candidate.SCost) {
859 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
860 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000861 LLVM_DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost));
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000862 Candidate.SU = *I;
863 Candidate.RPDelta = RPDelta;
864 Candidate.SCost = CurrentCost;
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000865 FoundCandidate = NodeOrder;
Krzysztof Parzyszekdca38312018-03-20 12:28:43 +0000866 continue;
867 }
868 }
869
Sergei Larin4d8986a2012-09-04 14:49:56 +0000870 // Fall through to original instruction order.
871 // Only consider node order if Candidate was chosen from this Q.
872 if (FoundCandidate == NoCand)
873 continue;
874 }
875 return FoundCandidate;
876}
877
878/// Pick the best candidate node from either the top or bottom queue.
879SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
880 // Schedule as far as possible in the direction of no choice. This is most
881 // efficient, but also provides the best heuristics for CriticalPSets.
882 if (SUnit *SU = Bot.pickOnlyChoice()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000883 LLVM_DEBUG(dbgs() << "Picked only Bottom\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000884 IsTopNode = false;
885 return SU;
886 }
887 if (SUnit *SU = Top.pickOnlyChoice()) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000888 LLVM_DEBUG(dbgs() << "Picked only Top\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000889 IsTopNode = true;
890 return SU;
891 }
892 SchedCandidate BotCand;
893 // Prefer bottom scheduling when heuristics are silent.
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000894 CandResult BotResult = pickNodeFromQueue(Bot,
Sergei Larin4d8986a2012-09-04 14:49:56 +0000895 DAG->getBotRPTracker(), BotCand);
896 assert(BotResult != NoCand && "failed to find the first candidate");
897
898 // If either Q has a single candidate that provides the least increase in
899 // Excess pressure, we can immediately schedule from that Q.
900 //
901 // RegionCriticalPSets summarizes the pressure within the scheduled region and
902 // affects picking from either Q. If scheduling in one direction must
903 // increase pressure for one of the excess PSets, then schedule in that
904 // direction first to provide more freedom in the other direction.
905 if (BotResult == SingleExcess || BotResult == SingleCritical) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000906 LLVM_DEBUG(dbgs() << "Prefered Bottom Node\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000907 IsTopNode = false;
908 return BotCand.SU;
909 }
910 // Check if the top Q has a better candidate.
911 SchedCandidate TopCand;
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000912 CandResult TopResult = pickNodeFromQueue(Top,
Sergei Larin4d8986a2012-09-04 14:49:56 +0000913 DAG->getTopRPTracker(), TopCand);
914 assert(TopResult != NoCand && "failed to find the first candidate");
915
916 if (TopResult == SingleExcess || TopResult == SingleCritical) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000917 LLVM_DEBUG(dbgs() << "Prefered Top Node\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000918 IsTopNode = true;
919 return TopCand.SU;
920 }
921 // If either Q has a single candidate that minimizes pressure above the
922 // original region's pressure pick it.
923 if (BotResult == SingleMax) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000924 LLVM_DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000925 IsTopNode = false;
926 return BotCand.SU;
927 }
928 if (TopResult == SingleMax) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000929 LLVM_DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000930 IsTopNode = true;
931 return TopCand.SU;
932 }
933 if (TopCand.SCost > BotCand.SCost) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000934 LLVM_DEBUG(dbgs() << "Prefered Top Node Cost\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000935 IsTopNode = true;
936 return TopCand.SU;
937 }
938 // Otherwise prefer the bottom candidate in node order.
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000939 LLVM_DEBUG(dbgs() << "Prefered Bottom in Node order\n");
Sergei Larin4d8986a2012-09-04 14:49:56 +0000940 IsTopNode = false;
941 return BotCand.SU;
942}
943
944/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
945SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
946 if (DAG->top() == DAG->bottom()) {
947 assert(Top.Available.empty() && Top.Pending.empty() &&
948 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
Craig Topper062a2ba2014-04-25 05:30:21 +0000949 return nullptr;
Sergei Larin4d8986a2012-09-04 14:49:56 +0000950 }
951 SUnit *SU;
Eugene Zelenko3b873362017-09-28 22:27:31 +0000952 if (ForceTopDown) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000953 SU = Top.pickOnlyChoice();
954 if (!SU) {
955 SchedCandidate TopCand;
956 CandResult TopResult =
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000957 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000958 assert(TopResult != NoCand && "failed to find the first candidate");
959 (void)TopResult;
960 SU = TopCand.SU;
961 }
962 IsTopNode = true;
Eugene Zelenko3b873362017-09-28 22:27:31 +0000963 } else if (ForceBottomUp) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000964 SU = Bot.pickOnlyChoice();
965 if (!SU) {
966 SchedCandidate BotCand;
967 CandResult BotResult =
Krzysztof Parzyszek65059ee2018-03-20 19:26:27 +0000968 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
Sergei Larin4d8986a2012-09-04 14:49:56 +0000969 assert(BotResult != NoCand && "failed to find the first candidate");
970 (void)BotResult;
971 SU = BotCand.SU;
972 }
973 IsTopNode = false;
974 } else {
975 SU = pickNodeBidrectional(IsTopNode);
976 }
977 if (SU->isTopReady())
978 Top.removeReady(SU);
979 if (SU->isBottomReady())
980 Bot.removeReady(SU);
981
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000982 LLVM_DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
983 << " Scheduling instruction in cycle "
984 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " ("
985 << reportPackets() << ")\n";
Matthias Braun726e12c2018-09-19 00:23:35 +0000986 DAG->dumpNode(*SU));
Sergei Larin4d8986a2012-09-04 14:49:56 +0000987 return SU;
988}
989
990/// Update the scheduler's state after scheduling a node. This is the same node
Sergei Larinef4cc112012-09-10 17:31:34 +0000991/// that was just returned by pickNode(). However, VLIWMachineScheduler needs
992/// to update it's state based on the current cycle before MachineSchedStrategy
993/// does.
Sergei Larin4d8986a2012-09-04 14:49:56 +0000994void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
995 if (IsTopNode) {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000996 Top.bumpNode(SU);
Krzysztof Parzyszek4c6b65f2018-03-20 17:03:27 +0000997 SU->TopReadyCycle = Top.CurrCycle;
Sergei Larinef4cc112012-09-10 17:31:34 +0000998 } else {
Sergei Larin4d8986a2012-09-04 14:49:56 +0000999 Bot.bumpNode(SU);
Krzysztof Parzyszek4c6b65f2018-03-20 17:03:27 +00001000 SU->BotReadyCycle = Bot.CurrCycle;
Sergei Larin4d8986a2012-09-04 14:49:56 +00001001 }
1002}