Factor out more code for computing register live-range informationfor
scheduling, and generalize is so that preserves state across
scheduling regions. This fixes incorrect live-range information around
terminators and labels, which are effective region boundaries.
In place of looking for terminators to anchor inter-block dependencies,
introduce special entry and exit scheduling units for this purpose.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64254 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index 94b6be1..617b4ac 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -94,6 +94,25 @@
/// HazardRec - The hazard recognizer to use.
ScheduleHazardRecognizer *HazardRec;
+ /// Classes - For live regs that are only used in one register class in a
+ /// live range, the register class. If the register is not live, the
+ /// corresponding value is null. If the register is live but used in
+ /// multiple register classes, the corresponding value is -1 casted to a
+ /// pointer.
+ const TargetRegisterClass *
+ Classes[TargetRegisterInfo::FirstVirtualRegister];
+
+ /// RegRegs - Map registers to all their references within a live range.
+ std::multimap<unsigned, MachineOperand *> RegRefs;
+
+ /// The index of the most recent kill (proceding bottom-up), or ~0u if
+ /// the register is not live.
+ unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+
+ /// The index of the most recent complete def (proceding bottom up), or ~0u
+ /// if the register is live.
+ unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
+
public:
SchedulePostRATDList(MachineFunction &MF,
const MachineLoopInfo &MLI,
@@ -107,10 +126,29 @@
delete HazardRec;
}
+ /// StartBlock - Initialize register live-range state for scheduling in
+ /// this block.
+ ///
+ void StartBlock(MachineBasicBlock *BB);
+
+ /// Schedule - Schedule the instruction range using list scheduling.
+ ///
void Schedule();
+ /// Observe - Update liveness information to account for the current
+ /// instruction, which will not be scheduled.
+ ///
+ void Observe(MachineInstr *MI);
+
+ /// FinishBlock - Clean up register live-range state.
+ ///
+ void FinishBlock();
+
private:
+ void PrescanInstruction(MachineInstr *MI);
+ void ScanInstruction(MachineInstr *MI, unsigned Count);
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
+ void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
bool BreakAntiDependencies();
@@ -173,6 +211,19 @@
};
}
+/// isSchedulingBoundary - Test if the given instruction should be
+/// considered a scheduling boundary. This primarily includes labels
+/// and terminators.
+///
+static bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineFunction &MF) {
+ // Terminators and labels can't be scheduled around.
+ if (MI->getDesc().isTerminator() || MI->isLabel())
+ return true;
+
+ return false;
+}
+
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
DOUT << "PostRAScheduler\n";
@@ -187,131 +238,51 @@
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
+ // Initialize register live-range state for scheduling in this block.
+ Scheduler.StartBlock(MBB);
+
// Schedule each sequence of instructions not interrupted by a label
// or anything else that effectively needs to shut down scheduling.
- MachineBasicBlock::iterator Current = MBB->end(), Top = MBB->begin();
- for (MachineBasicBlock::iterator I = Current; I != Top; ) {
- MachineInstr *MI = --I;
- if (MI->getDesc().isTerminator() || MI->isLabel()) {
- Scheduler.Run(0, MBB, next(I), Current);
- Scheduler.EmitSchedule();
- Current = I;
+ MachineBasicBlock::iterator Current = MBB->end();
+ for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
+ MachineInstr *MI = prior(I);
+ if (isSchedulingBoundary(MI, Fn)) {
+ if (I != Current) {
+ Scheduler.Run(0, MBB, I, Current);
+ Scheduler.EmitSchedule();
+ }
+ Scheduler.Observe(MI);
+ Current = MI;
}
+ I = MI;
}
-
- Scheduler.Run(0, MBB, Top, Current);
+ Scheduler.Run(0, MBB, MBB->begin(), Current);
Scheduler.EmitSchedule();
+
+ // Clean up register live-range state.
+ Scheduler.FinishBlock();
}
return true;
}
-/// Schedule - Schedule the DAG using list scheduling.
-void SchedulePostRATDList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
-
- // Build the scheduling graph.
- BuildSchedGraph();
-
- if (EnableAntiDepBreaking) {
- if (BreakAntiDependencies()) {
- // We made changes. Update the dependency graph.
- // Theoretically we could update the graph in place:
- // When a live range is changed to use a different register, remove
- // the def's anti-dependence *and* output-dependence edges due to
- // that register, and add new anti-dependence and output-dependence
- // edges based on the next live range of the register.
- SUnits.clear();
- BuildSchedGraph();
- }
- }
-
- AvailableQueue.initNodes(SUnits);
-
- ListScheduleTopDown();
-
- AvailableQueue.releaseState();
-}
-
-/// getInstrOperandRegClass - Return register class of the operand of an
-/// instruction of the specified TargetInstrDesc.
-static const TargetRegisterClass*
-getInstrOperandRegClass(const TargetRegisterInfo *TRI,
- const TargetInstrDesc &II, unsigned Op) {
- if (Op >= II.getNumOperands())
- return NULL;
- if (II.OpInfo[Op].isLookupPtrRegClass())
- return TRI->getPointerRegClass();
- return TRI->getRegClass(II.OpInfo[Op].RegClass);
-}
-
-/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
-/// critical path.
-static SDep *CriticalPathStep(SUnit *SU) {
- SDep *Next = 0;
- unsigned NextDepth = 0;
- // Find the predecessor edge with the greatest depth.
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
- P != PE; ++P) {
- SUnit *PredSU = P->getSUnit();
- unsigned PredLatency = P->getLatency();
- unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
- // In the case of a latency tie, prefer an anti-dependency edge over
- // other types of edges.
- if (NextDepth < PredTotalLatency ||
- (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
- NextDepth = PredTotalLatency;
- Next = &*P;
- }
- }
- return Next;
-}
-
-/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
-/// of the ScheduleDAG and break them by renaming registers.
+/// StartBlock - Initialize register live-range state for scheduling in
+/// this block.
///
-bool SchedulePostRATDList::BreakAntiDependencies() {
- // The code below assumes that there is at least one instruction,
- // so just duck out immediately if the block is empty.
- if (SUnits.empty()) return false;
+void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
+ // Call the superclass.
+ ScheduleDAGInstrs::StartBlock(BB);
- // Find the node at the bottom of the critical path.
- SUnit *Max = 0;
- for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- SUnit *SU = &SUnits[i];
- if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
- Max = SU;
- }
+ // Clear out the register class data.
+ std::fill(Classes, array_endof(Classes),
+ static_cast<const TargetRegisterClass *>(0));
- DOUT << "Critical path has total latency "
- << (Max->getDepth() + Max->Latency) << "\n";
-
- // Track progress along the critical path through the SUnit graph as we walk
- // the instructions.
- SUnit *CriticalPathSU = Max;
- MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
-
- // For live regs that are only used in one register class in a live range,
- // the register class. If the register is not live, the corresponding value
- // is null. If the register is live but used in multiple register classes,
- // the corresponding value is -1 casted to a pointer.
- const TargetRegisterClass *
- Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
-
- // Map registers to all their references within a live range.
- std::multimap<unsigned, MachineOperand *> RegRefs;
-
- // The index of the most recent kill (proceding bottom-up), or ~0u if
- // the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+ // Initialize the indices to indicate that no registers are live.
std::fill(KillIndices, array_endof(KillIndices), ~0u);
- // The index of the most recent complete def (proceding bottom up), or ~0u if
- // the register is live.
- unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
std::fill(DefIndices, array_endof(DefIndices), BB->size());
// Determine the live-out physregs for this block.
- if (BB->back().getDesc().isReturn())
+ if (!BB->empty() && BB->back().getDesc().isReturn())
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
@@ -368,6 +339,217 @@
DefIndices[AliasReg] = ~0u;
}
}
+}
+
+/// Schedule - Schedule the instruction range using list scheduling.
+///
+void SchedulePostRATDList::Schedule() {
+ DOUT << "********** List Scheduling **********\n";
+
+ // Build the scheduling graph.
+ BuildSchedGraph();
+
+ if (EnableAntiDepBreaking) {
+ if (BreakAntiDependencies()) {
+ // We made changes. Update the dependency graph.
+ // Theoretically we could update the graph in place:
+ // When a live range is changed to use a different register, remove
+ // the def's anti-dependence *and* output-dependence edges due to
+ // that register, and add new anti-dependence and output-dependence
+ // edges based on the next live range of the register.
+ SUnits.clear();
+ EntrySU = SUnit();
+ ExitSU = SUnit();
+ BuildSchedGraph();
+ }
+ }
+
+ AvailableQueue.initNodes(SUnits);
+
+ ListScheduleTopDown();
+
+ AvailableQueue.releaseState();
+}
+
+/// Observe - Update liveness information to account for the current
+/// instruction, which will not be scheduled.
+///
+void SchedulePostRATDList::Observe(MachineInstr *MI) {
+ PrescanInstruction(MI);
+ ScanInstruction(MI, 0);
+}
+
+/// FinishBlock - Clean up register live-range state.
+///
+void SchedulePostRATDList::FinishBlock() {
+ RegRefs.clear();
+
+ // Call the superclass.
+ ScheduleDAGInstrs::FinishBlock();
+}
+
+/// getInstrOperandRegClass - Return register class of the operand of an
+/// instruction of the specified TargetInstrDesc.
+static const TargetRegisterClass*
+getInstrOperandRegClass(const TargetRegisterInfo *TRI,
+ const TargetInstrDesc &II, unsigned Op) {
+ if (Op >= II.getNumOperands())
+ return NULL;
+ if (II.OpInfo[Op].isLookupPtrRegClass())
+ return TRI->getPointerRegClass();
+ return TRI->getRegClass(II.OpInfo[Op].RegClass);
+}
+
+/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
+/// critical path.
+static SDep *CriticalPathStep(SUnit *SU) {
+ SDep *Next = 0;
+ unsigned NextDepth = 0;
+ // Find the predecessor edge with the greatest depth.
+ for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
+ P != PE; ++P) {
+ SUnit *PredSU = P->getSUnit();
+ unsigned PredLatency = P->getLatency();
+ unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
+ // In the case of a latency tie, prefer an anti-dependency edge over
+ // other types of edges.
+ if (NextDepth < PredTotalLatency ||
+ (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
+ NextDepth = PredTotalLatency;
+ Next = &*P;
+ }
+ }
+ return Next;
+}
+
+void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
+ // Scan the register operands for this instruction and update
+ // Classes and RegRefs.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ const TargetRegisterClass *NewRC =
+ getInstrOperandRegClass(TRI, MI->getDesc(), i);
+
+ // For now, only allow the register to be changed if its register
+ // class is consistent across all uses.
+ if (!Classes[Reg] && NewRC)
+ Classes[Reg] = NewRC;
+ else if (!NewRC || Classes[Reg] != NewRC)
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
+ // Now check for aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ // If an alias of the reg is used during the live range, give up.
+ // Note that this allows us to skip checking if AntiDepReg
+ // overlaps with any of the aliases, among other things.
+ unsigned AliasReg = *Alias;
+ if (Classes[AliasReg]) {
+ Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ }
+ }
+
+ // If we're still willing to consider this register, note the reference.
+ if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
+ RegRefs.insert(std::make_pair(Reg, &MO));
+ }
+}
+
+void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
+ unsigned Count) {
+ // Update liveness.
+ // Proceding upwards, registers that are defed but not used in this
+ // instruction are now dead.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegReDefinedByTwoAddr(i)) continue;
+
+ DefIndices[Reg] = Count;
+ KillIndices[Reg] = ~0u;
+ Classes[Reg] = 0;
+ RegRefs.erase(Reg);
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ unsigned SubregReg = *Subreg;
+ DefIndices[SubregReg] = Count;
+ KillIndices[SubregReg] = ~0u;
+ Classes[SubregReg] = 0;
+ RegRefs.erase(SubregReg);
+ }
+ // Conservatively mark super-registers as unusable.
+ for (const unsigned *Super = TRI->getSuperRegisters(Reg);
+ *Super; ++Super) {
+ unsigned SuperReg = *Super;
+ Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ }
+ }
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isUse()) continue;
+
+ const TargetRegisterClass *NewRC =
+ getInstrOperandRegClass(TRI, MI->getDesc(), i);
+
+ // For now, only allow the register to be changed if its register
+ // class is consistent across all uses.
+ if (!Classes[Reg] && NewRC)
+ Classes[Reg] = NewRC;
+ else if (!NewRC || Classes[Reg] != NewRC)
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
+ RegRefs.insert(std::make_pair(Reg, &MO));
+
+ // It wasn't previously live but now it is, this is a kill.
+ if (KillIndices[Reg] == ~0u) {
+ KillIndices[Reg] = Count;
+ DefIndices[Reg] = ~0u;
+ }
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ if (KillIndices[AliasReg] == ~0u) {
+ KillIndices[AliasReg] = Count;
+ DefIndices[AliasReg] = ~0u;
+ }
+ }
+ }
+}
+
+/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
+/// of the ScheduleDAG and break them by renaming registers.
+///
+bool SchedulePostRATDList::BreakAntiDependencies() {
+ // The code below assumes that there is at least one instruction,
+ // so just duck out immediately if the block is empty.
+ if (SUnits.empty()) return false;
+
+ // Find the node at the bottom of the critical path.
+ SUnit *Max = 0;
+ for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
+ SUnit *SU = &SUnits[i];
+ if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
+ Max = SU;
+ }
+
+ DOUT << "Critical path has total latency "
+ << (Max->getDepth() + Max->Latency) << "\n";
+
+ // Track progress along the critical path through the SUnit graph as we walk
+ // the instructions.
+ SUnit *CriticalPathSU = Max;
+ MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
// Consider this pattern:
// A = ...
@@ -481,43 +663,19 @@
}
}
- // Scan the register operands for this instruction and update
- // Classes and RegRefs.
+ PrescanInstruction(MI);
+
+ // If this instruction has a use of AntiDepReg, breaking it
+ // is invalid.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- const TargetRegisterClass *NewRC =
- getInstrOperandRegClass(TRI, MI->getDesc(), i);
-
- // If this instruction has a use of AntiDepReg, breaking it
- // is invalid.
- if (MO.isUse() && AntiDepReg == Reg)
+ if (MO.isUse() && AntiDepReg == Reg) {
AntiDepReg = 0;
-
- // For now, only allow the register to be changed if its register
- // class is consistent across all uses.
- if (!Classes[Reg] && NewRC)
- Classes[Reg] = NewRC;
- else if (!NewRC || Classes[Reg] != NewRC)
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
-
- // Now check for aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- // If an alias of the reg is used during the live range, give up.
- // Note that this allows us to skip checking if AntiDepReg
- // overlaps with any of the aliases, among other things.
- unsigned AliasReg = *Alias;
- if (Classes[AliasReg]) {
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
+ break;
}
-
- // If we're still willing to consider this register, note the reference.
- if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
- RegRefs.insert(std::make_pair(Reg, &MO));
}
// Determine AntiDepReg's register class, if it is live and is
@@ -584,71 +742,7 @@
}
}
- // Update liveness.
- // Proceding upwards, registers that are defed but not used in this
- // instruction are now dead.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegReDefinedByTwoAddr(i)) continue;
-
- DefIndices[Reg] = Count;
- KillIndices[Reg] = ~0u;
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = ~0u;
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
- }
- // Conservatively mark super-registers as unusable.
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
- }
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isUse()) continue;
-
- const TargetRegisterClass *NewRC =
- getInstrOperandRegClass(TRI, MI->getDesc(), i);
-
- // For now, only allow the register to be changed if its register
- // class is consistent across all uses.
- if (!Classes[Reg] && NewRC)
- Classes[Reg] = NewRC;
- else if (!NewRC || Classes[Reg] != NewRC)
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
-
- RegRefs.insert(std::make_pair(Reg, &MO));
-
- // It wasn't previously live but now it is, this is a kill.
- if (KillIndices[Reg] == ~0u) {
- KillIndices[Reg] = Count;
- DefIndices[Reg] = ~0u;
- }
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- if (KillIndices[AliasReg] == ~0u) {
- KillIndices[AliasReg] = Count;
- DefIndices[AliasReg] = ~0u;
- }
- }
- }
+ ScanInstruction(MI, Count);
}
assert(Count == ~0u && "Count mismatch!");
@@ -679,9 +773,17 @@
// their latencies.
SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
- if (SuccSU->NumPredsLeft == 0) {
+ // If all the node's predecessors are scheduled, this node is ready
+ // to be scheduled. Ignore the special ExitSU node.
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
- }
+}
+
+/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
+void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I)
+ ReleaseSucc(SU, &*I);
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
@@ -695,11 +797,7 @@
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
- // Top down: release successors.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- ReleaseSucc(SU, &*I);
-
+ ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU);
}
@@ -709,6 +807,9 @@
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
+ // Release any successors of the special Entry node.
+ ReleaseSuccessors(&EntrySU);
+
// All leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
@@ -717,7 +818,7 @@
SUnits[i].isAvailable = true;
}
}
-
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;