Update V8 to version 4.1.0.21

This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.

Original commit message:

Version 4.1.0.21 (cherry-pick)

Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412

Unlink pages from the space page list after evacuation.

BUG=430201
LOG=N
R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/953813002

Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}

---

FPIIM-449

Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 4029950..f12c631 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -7,12 +7,13 @@
 
 #include "src/compiler/scheduler.h"
 
+#include "src/bit-vector.h"
+#include "src/compiler/control-equivalence.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node-properties-inl.h"
-#include "src/data-flow.h"
 
 namespace v8 {
 namespace internal {
@@ -28,30 +29,225 @@
 }
 
 
-// Internal class to build a control flow graph (i.e the basic blocks and edges
-// between them within a Schedule) from the node graph.
-// Visits the control edges of the graph backwards from end in order to find
-// the connected control subgraph, needed for scheduling.
-class CFGBuilder {
- public:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-  ZoneQueue<Node*> queue_;
-  NodeVector control_;
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
+      graph_(graph),
+      schedule_(schedule),
+      scheduled_nodes_(zone),
+      schedule_root_nodes_(zone),
+      schedule_queue_(zone),
+      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
 
+
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
+  Schedule* schedule = new (graph->zone())
+      Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
+  Scheduler scheduler(zone, graph, schedule);
+
+  scheduler.BuildCFG();
+  scheduler.ComputeSpecialRPONumbering();
+  scheduler.GenerateImmediateDominatorTree();
+
+  scheduler.PrepareUses();
+  scheduler.ScheduleEarly();
+  scheduler.ScheduleLate();
+
+  scheduler.SealFinalSchedule();
+
+  return schedule;
+}
+
+
+Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
+  SchedulerData def = {schedule_->start(), 0, kUnknown};
+  return def;
+}
+
+
+Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
+  DCHECK(node->id() < static_cast<int>(node_data_.size()));
+  return &node_data_[node->id()];
+}
+
+
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are always fixed to the start node.
+        data->placement_ = kFixed;
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are fixed if their control inputs are, whereas
+        // otherwise they are coupled to a floating control node.
+        Placement p = GetPlacement(NodeProperties::GetControlInput(node));
+        data->placement_ = (p == kFixed ? kFixed : kCoupled);
+        break;
+      }
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
+      {
+        // Control nodes that were not control-reachable from end may float.
+        data->placement_ = kSchedulable;
+        break;
+      }
+      default:
+        data->placement_ = kSchedulable;
+        break;
+    }
+  }
+  return data->placement_;
+}
+
+
+void Scheduler::UpdatePlacement(Node* node, Placement placement) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ != kUnknown) {  // Trap on mutation, not initialization.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are fixed once and for all.
+        UNREACHABLE();
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are coupled to their respective blocks.
+        DCHECK_EQ(Scheduler::kCoupled, data->placement_);
+        DCHECK_EQ(Scheduler::kFixed, placement);
+        Node* control = NodeProperties::GetControlInput(node);
+        BasicBlock* block = schedule_->block(control);
+        schedule_->AddNode(block, node);
+        break;
+      }
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
+      {
+        // Control nodes force coupled uses to be placed.
+        Node::Uses uses = node->uses();
+        for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+          if (GetPlacement(*i) == Scheduler::kCoupled) {
+            DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
+            UpdatePlacement(*i, placement);
+          }
+        }
+        break;
+      }
+      default:
+        DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+        DCHECK_EQ(Scheduler::kScheduled, placement);
+        break;
+    }
+    // Reduce the use count of the node's inputs to potentially make them
+    // schedulable. If all the uses of a node have been scheduled, then the node
+    // itself can be scheduled.
+    for (Edge const edge : node->input_edges()) {
+      DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
+    }
+  }
+  data->placement_ = placement;
+}
+
+
+bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
+  return GetPlacement(node) == kCoupled &&
+         NodeProperties::FirstControlIndex(node) == index;
+}
+
+
+void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
+                                             Node* from) {
+  // Make sure that control edges from coupled nodes are not counted.
+  if (IsCoupledControlEdge(from, index)) return;
+
+  // Tracking use counts for fixed nodes is useless.
+  if (GetPlacement(node) == kFixed) return;
+
+  // Use count for coupled nodes is summed up on their control.
+  if (GetPlacement(node) == kCoupled) {
+    Node* control = NodeProperties::GetControlInput(node);
+    return IncrementUnscheduledUseCount(control, index, from);
+  }
+
+  ++(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+}
+
+
+void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
+                                             Node* from) {
+  // Make sure that control edges from coupled nodes are not counted.
+  if (IsCoupledControlEdge(from, index)) return;
+
+  // Tracking use counts for fixed nodes is useless.
+  if (GetPlacement(node) == kFixed) return;
+
+  // Use count for coupled nodes is summed up on their control.
+  if (GetPlacement(node) == kCoupled) {
+    Node* control = NodeProperties::GetControlInput(node);
+    return DecrementUnscheduledUseCount(control, index, from);
+  }
+
+  DCHECK(GetData(node)->unscheduled_count_ > 0);
+  --(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+  if (GetData(node)->unscheduled_count_ == 0) {
+    Trace("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
+    schedule_queue_.push(node);
+  }
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+  while (b1 != b2) {
+    int32_t b1_depth = b1->dominator_depth();
+    int32_t b2_depth = b2->dominator_depth();
+    if (b1_depth < b2_depth) {
+      b2 = b2->dominator();
+    } else {
+      b1 = b1->dominator();
+    }
+  }
+  return b1;
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 1: Build control-flow graph.
+
+
+// Internal class to build a control flow graph (i.e the basic blocks and edges
+// between them within a Schedule) from the node graph. Visits control edges of
+// the graph backwards from an end node in order to find the connected control
+// subgraph, needed for scheduling.
+class CFGBuilder : public ZoneObject {
+ public:
   CFGBuilder(Zone* zone, Scheduler* scheduler)
       : scheduler_(scheduler),
         schedule_(scheduler->schedule_),
+        queued_(scheduler->graph_, 2),
         queue_(zone),
-        control_(zone) {}
+        control_(zone),
+        component_entry_(NULL),
+        component_start_(NULL),
+        component_end_(NULL) {}
 
   // Run the control flow graph construction algorithm by walking the graph
   // backwards from end through control edges, building and connecting the
   // basic blocks for control nodes.
   void Run() {
-    Graph* graph = scheduler_->graph_;
-    FixNode(schedule_->start(), graph->start());
-    Queue(graph->end());
+    ResetDataStructures();
+    Queue(scheduler_->graph_->end());
 
     while (!queue_.empty()) {  // Breadth-first backwards traversal.
       Node* node = queue_.front();
@@ -65,33 +261,82 @@
     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     }
-
-    FixNode(schedule_->end(), graph->end());
   }
 
+  // Run the control flow graph construction for a minimal control-connected
+  // component ending in {exit} and merge that component into an existing
+  // control flow graph at the bottom of {block}.
+  void Run(BasicBlock* block, Node* exit) {
+    ResetDataStructures();
+    Queue(exit);
+
+    component_entry_ = NULL;
+    component_start_ = block;
+    component_end_ = schedule_->block(exit);
+    scheduler_->equivalence_->Run(exit);
+    while (!queue_.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue_.front();
+      queue_.pop();
+
+      // Use control dependence equivalence to find a canonical single-entry
+      // single-exit region that makes up a minimal component to be scheduled.
+      if (IsSingleEntrySingleExitRegion(node, exit)) {
+        Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
+        DCHECK_EQ(NULL, component_entry_);
+        component_entry_ = node;
+        continue;
+      }
+
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        Queue(node->InputAt(i));
+      }
+    }
+    DCHECK_NE(NULL, component_entry_);
+
+    for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+      ConnectBlocks(*i);  // Connect block to its predecessor/successors.
+    }
+  }
+
+ private:
+  // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
+  friend class Scheduler;
+
   void FixNode(BasicBlock* block, Node* node) {
     schedule_->AddNode(block, node);
-    scheduler_->GetData(node)->is_connected_control_ = true;
-    scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+    scheduler_->UpdatePlacement(node, Scheduler::kFixed);
   }
 
   void Queue(Node* node) {
-    // Mark the connected control nodes as they queued.
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    if (!data->is_connected_control_) {
+    // Mark the connected control nodes as they are queued.
+    if (!queued_.Get(node)) {
       BuildBlocks(node);
       queue_.push(node);
+      queued_.Set(node, true);
       control_.push_back(node);
-      data->is_connected_control_ = true;
     }
   }
 
   void BuildBlocks(Node* node) {
     switch (node->opcode()) {
+      case IrOpcode::kEnd:
+        FixNode(schedule_->end(), node);
+        break;
+      case IrOpcode::kStart:
+        FixNode(schedule_->start(), node);
+        break;
       case IrOpcode::kLoop:
       case IrOpcode::kMerge:
         BuildBlockForNode(node);
         break;
+      case IrOpcode::kTerminate: {
+        // Put Terminate in the loop to which it refers.
+        Node* loop = NodeProperties::GetControlInput(node);
+        BasicBlock* block = BuildBlockForNode(loop);
+        FixNode(block, node);
+        break;
+      }
       case IrOpcode::kBranch:
         BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
         break;
@@ -107,11 +352,11 @@
         ConnectMerge(node);
         break;
       case IrOpcode::kBranch:
-        scheduler_->schedule_root_nodes_.push_back(node);
+        scheduler_->UpdatePlacement(node, Scheduler::kFixed);
         ConnectBranch(node);
         break;
       case IrOpcode::kReturn:
-        scheduler_->schedule_root_nodes_.push_back(node);
+        scheduler_->UpdatePlacement(node, Scheduler::kFixed);
         ConnectReturn(node);
         break;
       default:
@@ -119,13 +364,15 @@
     }
   }
 
-  void BuildBlockForNode(Node* node) {
-    if (schedule_->block(node) == NULL) {
-      BasicBlock* block = schedule_->NewBasicBlock();
-      Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
+  BasicBlock* BuildBlockForNode(Node* node) {
+    BasicBlock* block = schedule_->block(node);
+    if (block == NULL) {
+      block = schedule_->NewBasicBlock();
+      Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
             node->op()->mnemonic());
       FixNode(block, node);
     }
+    return block;
   }
 
   void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
@@ -144,14 +391,14 @@
                                    IrOpcode::Value false_opcode) {
     buffer[0] = NULL;
     buffer[1] = NULL;
-    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
-      if ((*i)->opcode() == true_opcode) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == true_opcode) {
         DCHECK_EQ(NULL, buffer[0]);
-        buffer[0] = *i;
+        buffer[0] = use;
       }
-      if ((*i)->opcode() == false_opcode) {
+      if (use->opcode() == false_opcode) {
         DCHECK_EQ(NULL, buffer[1]);
-        buffer[1] = *i;
+        buffer[1] = use;
       }
     }
     DCHECK_NE(NULL, buffer[0]);
@@ -168,33 +415,51 @@
   }
 
   void ConnectBranch(Node* branch) {
-    Node* branch_block_node = NodeProperties::GetControlInput(branch);
-    BasicBlock* branch_block = schedule_->block(branch_block_node);
-    DCHECK(branch_block != NULL);
-
     BasicBlock* successor_blocks[2];
     CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
                            IrOpcode::kIfFalse);
 
-    TraceConnect(branch, branch_block, successor_blocks[0]);
-    TraceConnect(branch, branch_block, successor_blocks[1]);
+    // Consider branch hints.
+    switch (BranchHintOf(branch->op())) {
+      case BranchHint::kNone:
+        break;
+      case BranchHint::kTrue:
+        successor_blocks[1]->set_deferred(true);
+        break;
+      case BranchHint::kFalse:
+        successor_blocks[0]->set_deferred(true);
+        break;
+    }
 
-    schedule_->AddBranch(branch_block, branch, successor_blocks[0],
-                         successor_blocks[1]);
+    if (branch == component_entry_) {
+      TraceConnect(branch, component_start_, successor_blocks[0]);
+      TraceConnect(branch, component_start_, successor_blocks[1]);
+      schedule_->InsertBranch(component_start_, component_end_, branch,
+                              successor_blocks[0], successor_blocks[1]);
+    } else {
+      Node* branch_block_node = NodeProperties::GetControlInput(branch);
+      BasicBlock* branch_block = schedule_->block(branch_block_node);
+      DCHECK(branch_block != NULL);
+
+      TraceConnect(branch, branch_block, successor_blocks[0]);
+      TraceConnect(branch, branch_block, successor_blocks[1]);
+      schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+                           successor_blocks[1]);
+    }
   }
 
   void ConnectMerge(Node* merge) {
+    // Don't connect the special merge at the end to its predecessors.
+    if (IsFinalMerge(merge)) return;
+
     BasicBlock* block = schedule_->block(merge);
     DCHECK(block != NULL);
     // For all of the merge's control inputs, add a goto at the end to the
     // merge's basic block.
-    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
-         ++j) {
-      BasicBlock* predecessor_block = schedule_->block(*j);
-      if ((*j)->opcode() != IrOpcode::kReturn) {
-        TraceConnect(merge, predecessor_block, block);
-        schedule_->AddGoto(predecessor_block, block);
-      }
+    for (Node* const input : merge->inputs()) {
+      BasicBlock* predecessor_block = schedule_->block(input);
+      TraceConnect(merge, predecessor_block, block);
+      schedule_->AddGoto(predecessor_block, block);
     }
   }
 
@@ -209,713 +474,59 @@
     DCHECK_NE(NULL, block);
     if (succ == NULL) {
       Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
-            block->id());
+            block->id().ToInt());
     } else {
       Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
-            block->id(), succ->id());
+            block->id().ToInt(), succ->id().ToInt());
     }
   }
+
+  bool IsFinalMerge(Node* node) {
+    return (node->opcode() == IrOpcode::kMerge &&
+            node == scheduler_->graph_->end()->InputAt(0));
+  }
+
+  bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
+    size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
+    size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
+    return entry != exit && entry_class == exit_class;
+  }
+
+  void ResetDataStructures() {
+    control_.clear();
+    DCHECK(queue_.empty());
+    DCHECK(control_.empty());
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
+  ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
+  NodeVector control_;           // List of encountered control nodes.
+  Node* component_entry_;        // Component single-entry node.
+  BasicBlock* component_start_;  // Component single-entry block.
+  BasicBlock* component_end_;    // Component single-exit block.
 };
 
 
-Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
-  SchedulerData def = {0, 0, false, false, kUnknown};
-  return def;
-}
-
-
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
-    : zone_(zone),
-      graph_(graph),
-      schedule_(schedule),
-      scheduled_nodes_(zone),
-      schedule_root_nodes_(zone),
-      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
-      has_floating_control_(false) {}
-
-
-Schedule* Scheduler::ComputeSchedule(Graph* graph) {
-  Schedule* schedule;
-  bool had_floating_control = false;
-  do {
-    Zone tmp_zone(graph->zone()->isolate());
-    schedule = new (graph->zone()) Schedule(graph->zone());
-    Scheduler scheduler(&tmp_zone, graph, schedule);
-
-    scheduler.BuildCFG();
-
-    Scheduler::ComputeSpecialRPO(schedule);
-    scheduler.GenerateImmediateDominatorTree();
-
-    scheduler.PrepareUses();
-    scheduler.ScheduleEarly();
-    scheduler.ScheduleLate();
-
-    had_floating_control = scheduler.ConnectFloatingControl();
-  } while (had_floating_control);
-
-  return schedule;
-}
-
-
-Scheduler::Placement Scheduler::GetPlacement(Node* node) {
-  SchedulerData* data = GetData(node);
-  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
-    switch (node->opcode()) {
-      case IrOpcode::kParameter:
-        // Parameters are always fixed to the start node.
-        data->placement_ = kFixed;
-        break;
-      case IrOpcode::kPhi:
-      case IrOpcode::kEffectPhi: {
-        // Phis and effect phis are fixed if their control inputs are.
-        data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
-        break;
-      }
-#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
-        CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
-#undef DEFINE_FLOATING_CONTROL_CASE
-        {
-          // Control nodes that were not control-reachable from end may float.
-          data->placement_ = kSchedulable;
-          if (!data->is_connected_control_) {
-            data->is_floating_control_ = true;
-            has_floating_control_ = true;
-            Trace("Floating control found: #%d:%s\n", node->id(),
-                  node->op()->mnemonic());
-          }
-          break;
-        }
-      default:
-        data->placement_ = kSchedulable;
-        break;
-    }
-  }
-  return data->placement_;
-}
-
-
 void Scheduler::BuildCFG() {
-  Trace("---------------- CREATING CFG ------------------\n");
-  CFGBuilder cfg_builder(zone_, this);
-  cfg_builder.Run();
+  Trace("--- CREATING CFG -------------------------------------------\n");
+
+  // Instantiate a new control equivalence algorithm for the graph.
+  equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
+
+  // Build a control-flow graph for the main control-connected component that
+  // is being spanned by the graph's start and end nodes.
+  control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
+  control_flow_builder_->Run();
+
   // Initialize per-block data.
   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
 }
 
 
-BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
-  while (b1 != b2) {
-    int b1_rpo = GetRPONumber(b1);
-    int b2_rpo = GetRPONumber(b2);
-    DCHECK(b1_rpo != b2_rpo);
-    if (b1_rpo < b2_rpo) {
-      b2 = b2->dominator_;
-    } else {
-      b1 = b1->dominator_;
-    }
-  }
-  return b1;
-}
-
-
-void Scheduler::GenerateImmediateDominatorTree() {
-  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
-  // if this becomes really slow.
-  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
-  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
-    BasicBlock* current_rpo = schedule_->rpo_order_[i];
-    if (current_rpo != schedule_->start()) {
-      BasicBlock::Predecessors::iterator current_pred =
-          current_rpo->predecessors().begin();
-      BasicBlock::Predecessors::iterator end =
-          current_rpo->predecessors().end();
-      DCHECK(current_pred != end);
-      BasicBlock* dominator = *current_pred;
-      ++current_pred;
-      // For multiple predecessors, walk up the rpo ordering until a common
-      // dominator is found.
-      int current_rpo_pos = GetRPONumber(current_rpo);
-      while (current_pred != end) {
-        // Don't examine backwards edges
-        BasicBlock* pred = *current_pred;
-        if (GetRPONumber(pred) < current_rpo_pos) {
-          dominator = GetCommonDominator(dominator, *current_pred);
-        }
-        ++current_pred;
-      }
-      current_rpo->dominator_ = dominator;
-      Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
-    }
-  }
-}
-
-
-class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
- public:
-  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
-      : has_changed_rpo_constraints_(true),
-        scheduler_(scheduler),
-        schedule_(scheduler->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    int max_rpo = 0;
-    // Fixed nodes already know their schedule early position.
-    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
-      BasicBlock* block = schedule_->block(node);
-      DCHECK(block != NULL);
-      max_rpo = block->rpo_number_;
-      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
-        has_changed_rpo_constraints_ = true;
-      }
-      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
-      Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
-            node->op()->mnemonic(), max_rpo);
-    }
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  GenericGraphVisit::Control Post(Node* node) {
-    int max_rpo = 0;
-    // Otherwise, the minimum rpo for the node is the max of all of the inputs.
-    if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
-      for (InputIter i = node->inputs().begin(); i != node->inputs().end();
-           ++i) {
-        int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
-        if (control_rpo > max_rpo) {
-          max_rpo = control_rpo;
-        }
-      }
-      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
-        has_changed_rpo_constraints_ = true;
-      }
-      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
-      Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
-            node->op()->mnemonic(), max_rpo);
-    }
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
-  // rewritten to use a pre-order traversal from the start instead.
-  bool has_changed_rpo_constraints_;
-
- private:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::ScheduleEarly() {
-  Trace("------------------- SCHEDULE EARLY ----------------\n");
-
-  int fixpoint_count = 0;
-  ScheduleEarlyNodeVisitor visitor(this);
-  while (visitor.has_changed_rpo_constraints_) {
-    visitor.has_changed_rpo_constraints_ = false;
-    graph_->VisitNodeInputsFromEnd(&visitor);
-    fixpoint_count++;
-  }
-
-  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
-}
-
-
-class PrepareUsesVisitor : public NullNodeVisitor {
- public:
-  explicit PrepareUsesVisitor(Scheduler* scheduler)
-      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
-      // Fixed nodes are always roots for schedule late.
-      scheduler_->schedule_root_nodes_.push_back(node);
-      if (!schedule_->IsScheduled(node)) {
-        // Make sure root nodes are scheduled in their respective blocks.
-        Trace("  Scheduling fixed position node #%d:%s\n", node->id(),
-              node->op()->mnemonic());
-        IrOpcode::Value opcode = node->opcode();
-        BasicBlock* block =
-            opcode == IrOpcode::kParameter
-                ? schedule_->start()
-                : schedule_->block(NodeProperties::GetControlInput(node));
-        DCHECK(block != NULL);
-        schedule_->AddNode(block, node);
-      }
-    }
-
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  void PostEdge(Node* from, int index, Node* to) {
-    // If the edge is from an unscheduled node, then tally it in the use count
-    // for all of its inputs. The same criterion will be used in ScheduleLate
-    // for decrementing use counts.
-    if (!schedule_->IsScheduled(from)) {
-      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
-      ++(scheduler_->GetData(to)->unscheduled_count_);
-      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
-            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
-            scheduler_->GetData(to)->unscheduled_count_);
-    }
-  }
-
- private:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::PrepareUses() {
-  Trace("------------------- PREPARE USES ------------------\n");
-  // Count the uses of every node, it will be used to ensure that all of a
-  // node's uses are scheduled before the node itself.
-  PrepareUsesVisitor prepare_uses(this);
-  graph_->VisitNodeInputsFromEnd(&prepare_uses);
-}
-
-
-class ScheduleLateNodeVisitor : public NullNodeVisitor {
- public:
-  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
-      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    // Don't schedule nodes that are already scheduled.
-    if (schedule_->IsScheduled(node)) {
-      return GenericGraphVisit::CONTINUE;
-    }
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
-
-    // If all the uses of a node have been scheduled, then the node itself can
-    // be scheduled.
-    bool eligible = data->unscheduled_count_ == 0;
-    Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
-          node->op()->mnemonic(), eligible ? "true" : "false");
-    if (!eligible) return GenericGraphVisit::DEFER;
-
-    // Determine the dominating block for all of the uses of this node. It is
-    // the latest block that this node can be scheduled in.
-    BasicBlock* block = NULL;
-    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
-         ++i) {
-      BasicBlock* use_block = GetBlockForUse(i.edge());
-      block = block == NULL ? use_block : use_block == NULL
-                                              ? block
-                                              : scheduler_->GetCommonDominator(
-                                                    block, use_block);
-    }
-    DCHECK(block != NULL);
-
-    int min_rpo = data->minimum_rpo_;
-    Trace(
-        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
-        "minimum_rpo = %d\n",
-        node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
-        min_rpo);
-    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
-    // into enclosing loop pre-headers until they would preceed their
-    // ScheduleEarly position.
-    BasicBlock* hoist_block = block;
-    while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
-      if (hoist_block->loop_depth_ < block->loop_depth_) {
-        block = hoist_block;
-        Trace("  hoisting #%d:%s to block %d\n", node->id(),
-              node->op()->mnemonic(), block->id());
-      }
-      // Try to hoist to the pre-header of the loop header.
-      hoist_block = hoist_block->loop_header();
-      if (hoist_block != NULL) {
-        BasicBlock* pre_header = hoist_block->dominator_;
-        DCHECK(pre_header == NULL ||
-               *hoist_block->predecessors().begin() == pre_header);
-        Trace(
-            "  hoist to pre-header B%d of loop header B%d, depth would be %d\n",
-            pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
-        hoist_block = pre_header;
-      }
-    }
-
-    ScheduleNode(block, node);
-
-    return GenericGraphVisit::CONTINUE;
-  }
-
- private:
-  BasicBlock* GetBlockForUse(Node::Edge edge) {
-    Node* use = edge.from();
-    IrOpcode::Value opcode = use->opcode();
-    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
-      // If the use is from a fixed (i.e. non-floating) phi, use the block
-      // of the corresponding control input to the merge.
-      int index = edge.index();
-      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
-        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
-              use->op()->mnemonic());
-        Node* merge = NodeProperties::GetControlInput(use, 0);
-        opcode = merge->opcode();
-        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
-        use = NodeProperties::GetControlInput(merge, index);
-      }
-    }
-    BasicBlock* result = schedule_->block(use);
-    if (result == NULL) return NULL;
-    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
-          use->op()->mnemonic(), result->id());
-    return result;
-  }
-
-  void ScheduleNode(BasicBlock* block, Node* node) {
-    schedule_->PlanNode(block, node);
-    scheduler_->scheduled_nodes_[block->id()].push_back(node);
-
-    // Reduce the use count of the node's inputs to potentially make them
-    // schedulable.
-    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
-      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
-      DCHECK(data->unscheduled_count_ > 0);
-      --data->unscheduled_count_;
-      if (FLAG_trace_turbo_scheduler) {
-        Trace("  Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
-              (*i)->op()->mnemonic(), i.edge().from()->id(),
-              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
-        if (data->unscheduled_count_ == 0) {
-          Trace("  newly eligible #%d:%s\n", (*i)->id(),
-                (*i)->op()->mnemonic());
-        }
-      }
-    }
-  }
-
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::ScheduleLate() {
-  Trace("------------------- SCHEDULE LATE -----------------\n");
-  if (FLAG_trace_turbo_scheduler) {
-    Trace("roots: ");
-    for (NodeVectorIter i = schedule_root_nodes_.begin();
-         i != schedule_root_nodes_.end(); ++i) {
-      Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
-    }
-    Trace("\n");
-  }
-
-  // Schedule: Places nodes in dominator block of all their uses.
-  ScheduleLateNodeVisitor schedule_late_visitor(this);
-
-  {
-    Zone zone(zone_->isolate());
-    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
-                             NodeInputIterationTraits<Node> >(
-        graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
-        &schedule_late_visitor);
-  }
-
-  // Add collected nodes for basic blocks to their blocks in the right order.
-  int block_num = 0;
-  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
-       i != scheduled_nodes_.end(); ++i) {
-    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
-      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
-    }
-    block_num++;
-  }
-}
-
-
-bool Scheduler::ConnectFloatingControl() {
-  if (!has_floating_control_) return false;
-
-  Trace("Connecting floating control...\n");
-
-  // Process blocks and instructions backwards to find and connect floating
-  // control nodes into the control graph according to the block they were
-  // scheduled into.
-  int max = static_cast<int>(schedule_->rpo_order()->size());
-  for (int i = max - 1; i >= 0; i--) {
-    BasicBlock* block = schedule_->rpo_order()->at(i);
-    // TODO(titzer): we place at most one floating control structure per
-    // basic block because scheduling currently can interleave phis from
-    // one subgraph with the merges from another subgraph.
-    bool one_placed = false;
-    for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
-      Node* node = block->nodes_[j];
-      SchedulerData* data = GetData(node);
-      if (data->is_floating_control_ && !data->is_connected_control_ &&
-          !one_placed) {
-        Trace("  Floating control #%d:%s was scheduled in B%d\n", node->id(),
-              node->op()->mnemonic(), block->id());
-        ConnectFloatingControlSubgraph(block, node);
-        one_placed = true;
-      }
-    }
-  }
-
-  return true;
-}
-
-
-void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
-  Node* block_start = block->nodes_[0];
-  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
-  // Find the current "control successor" of the node that starts the block
-  // by searching the control uses for a control input edge from a connected
-  // control node.
-  Node* control_succ = NULL;
-  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
-       ++i) {
-    Node::Edge edge = i.edge();
-    if (NodeProperties::IsControlEdge(edge) &&
-        GetData(edge.from())->is_connected_control_) {
-      DCHECK_EQ(NULL, control_succ);
-      control_succ = edge.from();
-      control_succ->ReplaceInput(edge.index(), end);
-    }
-  }
-  DCHECK_NE(NULL, control_succ);
-  Trace("  Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
-        end->id(), end->op()->mnemonic(), control_succ->id(),
-        control_succ->op()->mnemonic(), block_start->id(),
-        block_start->op()->mnemonic());
-
-  // Find the "start" node of the control subgraph, which should be the
-  // unique node that is itself floating control but has a control input that
-  // is not floating.
-  Node* start = NULL;
-  ZoneQueue<Node*> queue(zone_);
-  queue.push(end);
-  GetData(end)->is_connected_control_ = true;
-  while (!queue.empty()) {
-    Node* node = queue.front();
-    queue.pop();
-    Trace("  Search #%d:%s for control subgraph start\n", node->id(),
-          node->op()->mnemonic());
-    int max = NodeProperties::PastControlIndex(node);
-    for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
-      Node* input = node->InputAt(i);
-      SchedulerData* data = GetData(input);
-      if (data->is_floating_control_) {
-        // {input} is floating control.
-        if (!data->is_connected_control_) {
-          // First time seeing {input} during this traversal, queue it.
-          queue.push(input);
-          data->is_connected_control_ = true;
-        }
-      } else {
-        // Otherwise, {node} is the start node, because it is floating control
-        // but is connected to {input} that is not floating control.
-        DCHECK_EQ(NULL, start);  // There can be only one.
-        start = node;
-      }
-    }
-  }
-
-  DCHECK_NE(NULL, start);
-  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
-
-  Trace("  Connecting floating control start %d:%s to %d:%s\n", start->id(),
-        start->op()->mnemonic(), block_start->id(),
-        block_start->op()->mnemonic());
-}
-
-
-// Numbering for BasicBlockData.rpo_number_ for this block traversal:
-static const int kBlockOnStack = -2;
-static const int kBlockVisited1 = -3;
-static const int kBlockVisited2 = -4;
-static const int kBlockUnvisited1 = -1;
-static const int kBlockUnvisited2 = kBlockVisited1;
-
-struct SpecialRPOStackFrame {
-  BasicBlock* block;
-  int index;
-};
-
-struct BlockList {
-  BasicBlock* block;
-  BlockList* next;
-
-  BlockList* Add(Zone* zone, BasicBlock* b) {
-    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
-    list->block = b;
-    list->next = this;
-    return list;
-  }
-
-  void Serialize(BasicBlockVector* final_order) {
-    for (BlockList* l = this; l != NULL; l = l->next) {
-      l->block->rpo_number_ = static_cast<int>(final_order->size());
-      final_order->push_back(l->block);
-    }
-  }
-};
-
-struct LoopInfo {
-  BasicBlock* header;
-  ZoneList<BasicBlock*>* outgoing;
-  BitVector* members;
-  LoopInfo* prev;
-  BlockList* end;
-  BlockList* start;
-
-  void AddOutgoing(Zone* zone, BasicBlock* block) {
-    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
-    outgoing->Add(block, zone);
-  }
-};
-
-
-static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
-                int unvisited) {
-  if (child->rpo_number_ == unvisited) {
-    stack[depth].block = child;
-    stack[depth].index = 0;
-    child->rpo_number_ = kBlockOnStack;
-    return depth + 1;
-  }
-  return depth;
-}
-
-
-// Computes loop membership from the backedges of the control flow graph.
-static LoopInfo* ComputeLoopInfo(
-    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
-    ZoneList<std::pair<BasicBlock*, int> >* backedges) {
-  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
-  memset(loops, 0, num_loops * sizeof(LoopInfo));
-
-  // Compute loop membership starting from backedges.
-  // O(max(loop_depth) * max(|loop|)
-  for (int i = 0; i < backedges->length(); i++) {
-    BasicBlock* member = backedges->at(i).first;
-    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
-    int loop_num = header->loop_end_;
-    if (loops[loop_num].header == NULL) {
-      loops[loop_num].header = header;
-      loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
-    }
-
-    int queue_length = 0;
-    if (member != header) {
-      // As long as the header doesn't have a backedge to itself,
-      // Push the member onto the queue and process its predecessors.
-      if (!loops[loop_num].members->Contains(member->id())) {
-        loops[loop_num].members->Add(member->id());
-      }
-      queue[queue_length++].block = member;
-    }
-
-    // Propagate loop membership backwards. All predecessors of M up to the
-    // loop header H are members of the loop too. O(|blocks between M and H|).
-    while (queue_length > 0) {
-      BasicBlock* block = queue[--queue_length].block;
-      for (int i = 0; i < block->PredecessorCount(); i++) {
-        BasicBlock* pred = block->PredecessorAt(i);
-        if (pred != header) {
-          if (!loops[loop_num].members->Contains(pred->id())) {
-            loops[loop_num].members->Add(pred->id());
-            queue[queue_length++].block = pred;
-          }
-        }
-      }
-    }
-  }
-  return loops;
-}
-
-
-#if DEBUG
-static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
-  PrintF("-- RPO with %d loops ", num_loops);
-  if (num_loops > 0) {
-    PrintF("(");
-    for (int i = 0; i < num_loops; i++) {
-      if (i > 0) PrintF(" ");
-      PrintF("B%d", loops[i].header->id());
-    }
-    PrintF(") ");
-  }
-  PrintF("-- \n");
-
-  for (int i = 0; i < static_cast<int>(order->size()); i++) {
-    BasicBlock* block = (*order)[i];
-    int bid = block->id();
-    PrintF("%5d:", i);
-    for (int i = 0; i < num_loops; i++) {
-      bool membership = loops[i].members->Contains(bid);
-      bool range = loops[i].header->LoopContains(block);
-      PrintF(membership ? " |" : "  ");
-      PrintF(range ? "x" : " ");
-    }
-    PrintF("  B%d: ", bid);
-    if (block->loop_end_ >= 0) {
-      PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
-    }
-    PrintF("\n");
-  }
-}
-
-
-static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
-                             BasicBlockVector* order) {
-  DCHECK(order->size() > 0);
-  DCHECK((*order)[0]->id() == 0);  // entry should be first.
-
-  for (int i = 0; i < num_loops; i++) {
-    LoopInfo* loop = &loops[i];
-    BasicBlock* header = loop->header;
-
-    DCHECK(header != NULL);
-    DCHECK(header->rpo_number_ >= 0);
-    DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
-    DCHECK(header->loop_end_ >= 0);
-    DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
-    DCHECK(header->loop_end_ > header->rpo_number_);
-
-    // Verify the start ... end list relationship.
-    int links = 0;
-    BlockList* l = loop->start;
-    DCHECK(l != NULL && l->block == header);
-    bool end_found;
-    while (true) {
-      if (l == NULL || l == loop->end) {
-        end_found = (loop->end == l);
-        break;
-      }
-      // The list should be in same order as the final result.
-      DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
-      links++;
-      l = l->next;
-      DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
-    }
-    DCHECK(links > 0);
-    DCHECK(links == (header->loop_end_ - header->rpo_number_));
-    DCHECK(end_found);
-
-    // Check the contiguousness of loops.
-    int count = 0;
-    for (int j = 0; j < static_cast<int>(order->size()); j++) {
-      BasicBlock* block = order->at(j);
-      DCHECK(block->rpo_number_ == j);
-      if (j < header->rpo_number_ || j >= header->loop_end_) {
-        DCHECK(!loop->members->Contains(block->id()));
-      } else {
-        if (block == header) {
-          DCHECK(!loop->members->Contains(block->id()));
-        } else {
-          DCHECK(loop->members->Contains(block->id()));
-        }
-        count++;
-      }
-    }
-    DCHECK(links == count);
-  }
-}
-#endif  // DEBUG
+// -----------------------------------------------------------------------------
+// Phase 2: Compute special RPO and dominator tree.
 
 
 // Compute the special reverse-post-order block ordering, which is essentially
@@ -928,198 +539,945 @@
 //    headed at A.
 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
 //    do not belong to the loop.)
-// Note a simple RPO traversal satisfies (1) but not (3).
-BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
-  Zone tmp_zone(schedule->zone()->isolate());
-  Zone* zone = &tmp_zone;
-  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
-  // RPO should not have been computed for this schedule yet.
-  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
-  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
+// Note a simple RPO traversal satisfies (1) but not (2).
+class SpecialRPONumberer : public ZoneObject {
+ public:
+  SpecialRPONumberer(Zone* zone, Schedule* schedule)
+      : zone_(zone),
+        schedule_(schedule),
+        order_(NULL),
+        beyond_end_(NULL),
+        loops_(zone),
+        backedges_(zone),
+        stack_(zone),
+        previous_block_count_(0) {}
 
-  // Perform an iterative RPO traversal using an explicit stack,
-  // recording backedges that form cycles. O(|B|).
-  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
-  SpecialRPOStackFrame* stack =
-      zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
-  BasicBlock* entry = schedule->start();
-  BlockList* order = NULL;
-  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
-  int num_loops = 0;
-
-  while (stack_depth > 0) {
-    int current = stack_depth - 1;
-    SpecialRPOStackFrame* frame = stack + current;
-
-    if (frame->index < frame->block->SuccessorCount()) {
-      // Process the next successor.
-      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
-      if (succ->rpo_number_ == kBlockVisited1) continue;
-      if (succ->rpo_number_ == kBlockOnStack) {
-        // The successor is on the stack, so this is a backedge (cycle).
-        backedges.Add(
-            std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
-        if (succ->loop_end_ < 0) {
-          // Assign a new loop number to the header if it doesn't have one.
-          succ->loop_end_ = num_loops++;
-        }
-      } else {
-        // Push the successor onto the stack.
-        DCHECK(succ->rpo_number_ == kBlockUnvisited1);
-        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
-      }
-    } else {
-      // Finished with all successors; pop the stack and add the block.
-      order = order->Add(zone, frame->block);
-      frame->block->rpo_number_ = kBlockVisited1;
-      stack_depth--;
-    }
+  // Computes the special reverse-post-order for the main control flow graph,
+  // that is for the graph spanned between the schedule's start and end blocks.
+  void ComputeSpecialRPO() {
+    DCHECK(schedule_->end()->SuccessorCount() == 0);
+    DCHECK_EQ(NULL, order_);  // Main order does not exist yet.
+    ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
   }
 
-  // If no loops were encountered, then the order we computed was correct.
-  LoopInfo* loops = NULL;
-  if (num_loops != 0) {
-    // Otherwise, compute the loop information from the backedges in order
-    // to perform a traversal that groups loop bodies together.
-    loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
-                            &backedges);
+  // Computes the special reverse-post-order for a partial control flow graph,
+  // that is for the graph spanned between the given {entry} and {end} blocks,
+  // then updates the existing ordering with this new information.
+  void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+    DCHECK_NE(NULL, order_);  // Main order to be updated is present.
+    ComputeAndInsertSpecialRPO(entry, end);
+  }
 
-    // Initialize the "loop stack". Note the entry could be a loop header.
-    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
-    order = NULL;
+  // Serialize the previously computed order as a special reverse-post-order
+  // numbering for basic blocks into the final schedule.
+  void SerializeRPOIntoSchedule() {
+    int32_t number = 0;
+    for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+      b->set_rpo_number(number++);
+      schedule_->rpo_order()->push_back(b);
+    }
+    BeyondEndSentinel()->set_rpo_number(number);
+  }
 
-    // Perform an iterative post-order traversal, visiting loop bodies before
-    // edges that lead out of loops. Visits each block once, but linking loop
-    // sections together is linear in the loop size, so overall is
-    // O(|B| + max(loop_depth) * max(|loop|))
-    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
-    while (stack_depth > 0) {
-      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
-      BasicBlock* block = frame->block;
-      BasicBlock* succ = NULL;
+  // Print and verify the special reverse-post-order.
+  void PrintAndVerifySpecialRPO() {
+#if DEBUG
+    if (FLAG_trace_turbo_scheduler) PrintRPO();
+    VerifySpecialRPO();
+#endif
+  }
 
-      if (frame->index < block->SuccessorCount()) {
-        // Process the next normal successor.
-        succ = block->SuccessorAt(frame->index++);
-      } else if (block->IsLoopHeader()) {
-        // Process additional outgoing edges from the loop header.
-        if (block->rpo_number_ == kBlockOnStack) {
-          // Finish the loop body the first time the header is left on the
-          // stack.
-          DCHECK(loop != NULL && loop->header == block);
-          loop->start = order->Add(zone, block);
-          order = loop->end;
-          block->rpo_number_ = kBlockVisited2;
-          // Pop the loop stack and continue visiting outgoing edges within the
-          // the context of the outer loop, if any.
-          loop = loop->prev;
-          // We leave the loop header on the stack; the rest of this iteration
-          // and later iterations will go through its outgoing edges list.
-        }
+ private:
+  typedef std::pair<BasicBlock*, size_t> Backedge;
 
-        // Use the next outgoing edge if there are any.
-        int outgoing_index = frame->index - block->SuccessorCount();
-        LoopInfo* info = &loops[block->loop_end_];
-        DCHECK(loop != info);
-        if (info->outgoing != NULL &&
-            outgoing_index < info->outgoing->length()) {
-          succ = info->outgoing->at(outgoing_index);
-          frame->index++;
-        }
+  // Numbering for BasicBlock::rpo_number for this block traversal:
+  static const int kBlockOnStack = -2;
+  static const int kBlockVisited1 = -3;
+  static const int kBlockVisited2 = -4;
+  static const int kBlockUnvisited1 = -1;
+  static const int kBlockUnvisited2 = kBlockVisited1;
+
+  struct SpecialRPOStackFrame {
+    BasicBlock* block;
+    size_t index;
+  };
+
+  struct LoopInfo {
+    BasicBlock* header;
+    ZoneList<BasicBlock*>* outgoing;
+    BitVector* members;
+    LoopInfo* prev;
+    BasicBlock* end;
+    BasicBlock* start;
+
+    void AddOutgoing(Zone* zone, BasicBlock* block) {
+      if (outgoing == NULL) {
+        outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
       }
+      outgoing->Add(block, zone);
+    }
+  };
 
-      if (succ != NULL) {
+  int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
+           BasicBlock* child, int unvisited) {
+    if (child->rpo_number() == unvisited) {
+      stack[depth].block = child;
+      stack[depth].index = 0;
+      child->set_rpo_number(kBlockOnStack);
+      return depth + 1;
+    }
+    return depth;
+  }
+
+  BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
+    block->set_rpo_next(head);
+    return block;
+  }
+
+  static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
+  static void SetLoopNumber(BasicBlock* block, int loop_number) {
+    return block->set_loop_number(loop_number);
+  }
+  static bool HasLoopNumber(BasicBlock* block) {
+    return block->loop_number() >= 0;
+  }
+
+  // TODO(mstarzinger): We only need this special sentinel because some tests
+  // use the schedule's end block in actual control flow (e.g. with end having
+  // successors). Once this has been cleaned up we can use the end block here.
+  BasicBlock* BeyondEndSentinel() {
+    if (beyond_end_ == NULL) {
+      BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
+      beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
+    }
+    return beyond_end_;
+  }
+
+  // Compute special RPO for the control flow graph between {entry} and {end},
+  // mutating any existing order so that the result is still valid.
+  void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+    // RPO should not have been serialized for this schedule yet.
+    CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
+    CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
+    CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
+
+    // Find correct insertion point within existing order.
+    BasicBlock* insertion_point = entry->rpo_next();
+    BasicBlock* order = insertion_point;
+
+    // Perform an iterative RPO traversal using an explicit stack,
+    // recording backedges that form cycles. O(|B|).
+    DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
+    stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
+    previous_block_count_ = schedule_->BasicBlockCount();
+    int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
+    int num_loops = static_cast<int>(loops_.size());
+
+    while (stack_depth > 0) {
+      int current = stack_depth - 1;
+      SpecialRPOStackFrame* frame = &stack_[current];
+
+      if (frame->block != end &&
+          frame->index < frame->block->SuccessorCount()) {
         // Process the next successor.
-        if (succ->rpo_number_ == kBlockOnStack) continue;
-        if (succ->rpo_number_ == kBlockVisited2) continue;
-        DCHECK(succ->rpo_number_ == kBlockUnvisited2);
-        if (loop != NULL && !loop->members->Contains(succ->id())) {
-          // The successor is not in the current loop or any nested loop.
-          // Add it to the outgoing edges of this loop and visit it later.
-          loop->AddOutgoing(zone, succ);
+        BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+        if (succ->rpo_number() == kBlockVisited1) continue;
+        if (succ->rpo_number() == kBlockOnStack) {
+          // The successor is on the stack, so this is a backedge (cycle).
+          backedges_.push_back(Backedge(frame->block, frame->index - 1));
+          if (!HasLoopNumber(succ)) {
+            // Assign a new loop number to the header if it doesn't have one.
+            SetLoopNumber(succ, num_loops++);
+          }
         } else {
           // Push the successor onto the stack.
-          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
-          if (succ->IsLoopHeader()) {
-            // Push the inner loop onto the loop stack.
-            DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
-            LoopInfo* next = &loops[succ->loop_end_];
-            next->end = order;
-            next->prev = loop;
-            loop = next;
-          }
+          DCHECK(succ->rpo_number() == kBlockUnvisited1);
+          stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
         }
       } else {
-        // Finished with all successors of the current block.
-        if (block->IsLoopHeader()) {
-          // If we are going to pop a loop header, then add its entire body.
-          LoopInfo* info = &loops[block->loop_end_];
-          for (BlockList* l = info->start; true; l = l->next) {
-            if (l->next == info->end) {
-              l->next = order;
-              info->end = order;
-              break;
-            }
-          }
-          order = info->start;
-        } else {
-          // Pop a single node off the stack and add it to the order.
-          order = order->Add(zone, block);
-          block->rpo_number_ = kBlockVisited2;
-        }
+        // Finished with all successors; pop the stack and add the block.
+        order = PushFront(order, frame->block);
+        frame->block->set_rpo_number(kBlockVisited1);
         stack_depth--;
       }
     }
-  }
 
-  // Construct the final order from the list.
-  BasicBlockVector* final_order = &schedule->rpo_order_;
-  order->Serialize(final_order);
+    // If no loops were encountered, then the order we computed was correct.
+    if (num_loops > static_cast<int>(loops_.size())) {
+      // Otherwise, compute the loop information from the backedges in order
+      // to perform a traversal that groups loop bodies together.
+      ComputeLoopInfo(stack_, num_loops, &backedges_);
 
-  // Compute the correct loop header for every block and set the correct loop
-  // ends.
-  LoopInfo* current_loop = NULL;
-  BasicBlock* current_header = NULL;
-  int loop_depth = 0;
-  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
-       ++i) {
-    BasicBlock* current = *i;
-    current->loop_header_ = current_header;
-    if (current->IsLoopHeader()) {
-      loop_depth++;
-      current_loop = &loops[current->loop_end_];
-      BlockList* end = current_loop->end;
-      current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
-                                       : end->block->rpo_number_;
-      current_header = current_loop->header;
-      Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
-            loop_depth);
-    } else {
-      while (current_header != NULL &&
-             current->rpo_number_ >= current_header->loop_end_) {
+      // Initialize the "loop stack". Note the entry could be a loop header.
+      LoopInfo* loop =
+          HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+      order = insertion_point;
+
+      // Perform an iterative post-order traversal, visiting loop bodies before
+      // edges that lead out of loops. Visits each block once, but linking loop
+      // sections together is linear in the loop size, so overall is
+      // O(|B| + max(loop_depth) * max(|loop|))
+      stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
+      while (stack_depth > 0) {
+        SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
+        BasicBlock* block = frame->block;
+        BasicBlock* succ = NULL;
+
+        if (block != end && frame->index < block->SuccessorCount()) {
+          // Process the next normal successor.
+          succ = block->SuccessorAt(frame->index++);
+        } else if (HasLoopNumber(block)) {
+          // Process additional outgoing edges from the loop header.
+          if (block->rpo_number() == kBlockOnStack) {
+            // Finish the loop body the first time the header is left on the
+            // stack.
+            DCHECK(loop != NULL && loop->header == block);
+            loop->start = PushFront(order, block);
+            order = loop->end;
+            block->set_rpo_number(kBlockVisited2);
+            // Pop the loop stack and continue visiting outgoing edges within
+            // the context of the outer loop, if any.
+            loop = loop->prev;
+            // We leave the loop header on the stack; the rest of this iteration
+            // and later iterations will go through its outgoing edges list.
+          }
+
+          // Use the next outgoing edge if there are any.
+          int outgoing_index =
+              static_cast<int>(frame->index - block->SuccessorCount());
+          LoopInfo* info = &loops_[GetLoopNumber(block)];
+          DCHECK(loop != info);
+          if (block != entry && info->outgoing != NULL &&
+              outgoing_index < info->outgoing->length()) {
+            succ = info->outgoing->at(outgoing_index);
+            frame->index++;
+          }
+        }
+
+        if (succ != NULL) {
+          // Process the next successor.
+          if (succ->rpo_number() == kBlockOnStack) continue;
+          if (succ->rpo_number() == kBlockVisited2) continue;
+          DCHECK(succ->rpo_number() == kBlockUnvisited2);
+          if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
+            // The successor is not in the current loop or any nested loop.
+            // Add it to the outgoing edges of this loop and visit it later.
+            loop->AddOutgoing(zone_, succ);
+          } else {
+            // Push the successor onto the stack.
+            stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
+            if (HasLoopNumber(succ)) {
+              // Push the inner loop onto the loop stack.
+              DCHECK(GetLoopNumber(succ) < num_loops);
+              LoopInfo* next = &loops_[GetLoopNumber(succ)];
+              next->end = order;
+              next->prev = loop;
+              loop = next;
+            }
+          }
+        } else {
+          // Finished with all successors of the current block.
+          if (HasLoopNumber(block)) {
+            // If we are going to pop a loop header, then add its entire body.
+            LoopInfo* info = &loops_[GetLoopNumber(block)];
+            for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
+              if (b->rpo_next() == info->end) {
+                b->set_rpo_next(order);
+                info->end = order;
+                break;
+              }
+            }
+            order = info->start;
+          } else {
+            // Pop a single node off the stack and add it to the order.
+            order = PushFront(order, block);
+            block->set_rpo_number(kBlockVisited2);
+          }
+          stack_depth--;
+        }
+      }
+    }
+
+    // Publish new order the first time.
+    if (order_ == NULL) order_ = order;
+
+    // Compute the correct loop headers and set the correct loop ends.
+    LoopInfo* current_loop = NULL;
+    BasicBlock* current_header = entry->loop_header();
+    int32_t loop_depth = entry->loop_depth();
+    if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
+    for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
+      BasicBlock* current = b;
+
+      // Reset BasicBlock::rpo_number again.
+      current->set_rpo_number(kBlockUnvisited1);
+
+      // Finish the previous loop(s) if we just exited them.
+      while (current_header != NULL && current == current_header->loop_end()) {
         DCHECK(current_header->IsLoopHeader());
         DCHECK(current_loop != NULL);
         current_loop = current_loop->prev;
         current_header = current_loop == NULL ? NULL : current_loop->header;
         --loop_depth;
       }
+      current->set_loop_header(current_header);
+
+      // Push a new loop onto the stack if this loop is a loop header.
+      if (HasLoopNumber(current)) {
+        ++loop_depth;
+        current_loop = &loops_[GetLoopNumber(current)];
+        BasicBlock* end = current_loop->end;
+        current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
+        current_header = current_loop->header;
+        Trace("B%d is a loop header, increment loop depth to %d\n",
+              current->id().ToInt(), loop_depth);
+      }
+
+      current->set_loop_depth(loop_depth);
+
+      if (current->loop_header() == NULL) {
+        Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
+              current->loop_depth());
+      } else {
+        Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
+              current->loop_header()->id().ToInt(), current->loop_depth());
+      }
     }
-    current->loop_depth_ = loop_depth;
-    if (current->loop_header_ == NULL) {
-      Trace("B%d is not in a loop (depth == %d)\n", current->id(),
-            current->loop_depth_);
-    } else {
-      Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
-            current->loop_header_->id(), current->loop_depth_);
+  }
+
+  // Computes loop membership from the backedges of the control flow graph.
+  void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
+                       size_t num_loops, ZoneVector<Backedge>* backedges) {
+    // Extend existing loop membership vectors.
+    for (LoopInfo& loop : loops_) {
+      BitVector* new_members = new (zone_)
+          BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
+      new_members->CopyFrom(*loop.members);
+      loop.members = new_members;
+    }
+
+    // Extend loop information vector.
+    loops_.resize(num_loops, LoopInfo());
+
+    // Compute loop membership starting from backedges.
+    // O(max(loop_depth) * max(|loop|)
+    for (size_t i = 0; i < backedges->size(); i++) {
+      BasicBlock* member = backedges->at(i).first;
+      BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+      size_t loop_num = GetLoopNumber(header);
+      if (loops_[loop_num].header == NULL) {
+        loops_[loop_num].header = header;
+        loops_[loop_num].members = new (zone_)
+            BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
+      }
+
+      int queue_length = 0;
+      if (member != header) {
+        // As long as the header doesn't have a backedge to itself,
+        // Push the member onto the queue and process its predecessors.
+        if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
+          loops_[loop_num].members->Add(member->id().ToInt());
+        }
+        queue[queue_length++].block = member;
+      }
+
+      // Propagate loop membership backwards. All predecessors of M up to the
+      // loop header H are members of the loop too. O(|blocks between M and H|).
+      while (queue_length > 0) {
+        BasicBlock* block = queue[--queue_length].block;
+        for (size_t i = 0; i < block->PredecessorCount(); i++) {
+          BasicBlock* pred = block->PredecessorAt(i);
+          if (pred != header) {
+            if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
+              loops_[loop_num].members->Add(pred->id().ToInt());
+              queue[queue_length++].block = pred;
+            }
+          }
+        }
+      }
     }
   }
 
 #if DEBUG
-  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
-  VerifySpecialRPO(num_loops, loops, final_order);
+  void PrintRPO() {
+    OFStream os(stdout);
+    os << "RPO with " << loops_.size() << " loops";
+    if (loops_.size() > 0) {
+      os << " (";
+      for (size_t i = 0; i < loops_.size(); i++) {
+        if (i > 0) os << " ";
+        os << "B" << loops_[i].header->id();
+      }
+      os << ")";
+    }
+    os << ":\n";
+
+    for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
+      BasicBlock::Id bid = block->id();
+      // TODO(jarin,svenpanne): Add formatting here once we have support for
+      // that in streams (we want an equivalent of PrintF("%5d:", x) here).
+      os << "  " << block->rpo_number() << ":";
+      for (size_t i = 0; i < loops_.size(); i++) {
+        bool range = loops_[i].header->LoopContains(block);
+        bool membership = loops_[i].header != block && range;
+        os << (membership ? " |" : "  ");
+        os << (range ? "x" : " ");
+      }
+      os << "  B" << bid << ": ";
+      if (block->loop_end() != NULL) {
+        os << " range: [" << block->rpo_number() << ", "
+           << block->loop_end()->rpo_number() << ")";
+      }
+      if (block->loop_header() != NULL) {
+        os << " header: B" << block->loop_header()->id();
+      }
+      if (block->loop_depth() > 0) {
+        os << " depth: " << block->loop_depth();
+      }
+      os << "\n";
+    }
+  }
+
+  void VerifySpecialRPO() {
+    BasicBlockVector* order = schedule_->rpo_order();
+    DCHECK(order->size() > 0);
+    DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
+
+    for (size_t i = 0; i < loops_.size(); i++) {
+      LoopInfo* loop = &loops_[i];
+      BasicBlock* header = loop->header;
+      BasicBlock* end = header->loop_end();
+
+      DCHECK(header != NULL);
+      DCHECK(header->rpo_number() >= 0);
+      DCHECK(header->rpo_number() < static_cast<int>(order->size()));
+      DCHECK(end != NULL);
+      DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
+      DCHECK(end->rpo_number() > header->rpo_number());
+      DCHECK(header->loop_header() != header);
+
+      // Verify the start ... end list relationship.
+      int links = 0;
+      BasicBlock* block = loop->start;
+      DCHECK_EQ(header, block);
+      bool end_found;
+      while (true) {
+        if (block == NULL || block == loop->end) {
+          end_found = (loop->end == block);
+          break;
+        }
+        // The list should be in same order as the final result.
+        DCHECK(block->rpo_number() == links + header->rpo_number());
+        links++;
+        block = block->rpo_next();
+        DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
+      }
+      DCHECK(links > 0);
+      DCHECK(links == end->rpo_number() - header->rpo_number());
+      DCHECK(end_found);
+
+      // Check loop depth of the header.
+      int loop_depth = 0;
+      for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
+        loop_depth++;
+      }
+      DCHECK_EQ(loop_depth, header->loop_depth());
+
+      // Check the contiguousness of loops.
+      int count = 0;
+      for (int j = 0; j < static_cast<int>(order->size()); j++) {
+        BasicBlock* block = order->at(j);
+        DCHECK(block->rpo_number() == j);
+        if (j < header->rpo_number() || j >= end->rpo_number()) {
+          DCHECK(!header->LoopContains(block));
+        } else {
+          DCHECK(header->LoopContains(block));
+          DCHECK_GE(block->loop_depth(), loop_depth);
+          count++;
+        }
+      }
+      DCHECK(links == count);
+    }
+  }
+#endif  // DEBUG
+
+  Zone* zone_;
+  Schedule* schedule_;
+  BasicBlock* order_;
+  BasicBlock* beyond_end_;
+  ZoneVector<LoopInfo> loops_;
+  ZoneVector<Backedge> backedges_;
+  ZoneVector<SpecialRPOStackFrame> stack_;
+  size_t previous_block_count_;
+};
+
+
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
+  SpecialRPONumberer numberer(zone, schedule);
+  numberer.ComputeSpecialRPO();
+  numberer.SerializeRPOIntoSchedule();
+  numberer.PrintAndVerifySpecialRPO();
+  return schedule->rpo_order();
+}
+
+
+void Scheduler::ComputeSpecialRPONumbering() {
+  Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
+
+  // Compute the special reverse-post-order for basic blocks.
+  special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
+  special_rpo_->ComputeSpecialRPO();
+}
+
+
+void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
+  for (/*nop*/; block != NULL; block = block->rpo_next()) {
+    BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
+    BasicBlock::Predecessors::iterator end = block->predecessors_end();
+    DCHECK(pred != end);  // All blocks except start have predecessors.
+    BasicBlock* dominator = *pred;
+    // For multiple predecessors, walk up the dominator tree until a common
+    // dominator is found. Visitation order guarantees that all predecessors
+    // except for backwards edges have been visited.
+    for (++pred; pred != end; ++pred) {
+      // Don't examine backwards edges.
+      if ((*pred)->dominator_depth() < 0) continue;
+      dominator = GetCommonDominator(dominator, *pred);
+    }
+    block->set_dominator(dominator);
+    block->set_dominator_depth(dominator->dominator_depth() + 1);
+    // Propagate "deferredness" of the dominator.
+    if (dominator->deferred()) block->set_deferred(true);
+    Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
+          dominator->id().ToInt(), block->dominator_depth());
+  }
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+  Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
+
+  // Seed start block to be the first dominator.
+  schedule_->start()->set_dominator_depth(0);
+
+  // Build the block dominator tree resulting from the above seed.
+  PropagateImmediateDominators(schedule_->start()->rpo_next());
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 3: Prepare use counts for nodes.
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+  explicit PrepareUsesVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+  void Pre(Node* node) {
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      // Fixed nodes are always roots for schedule late.
+      scheduler_->schedule_root_nodes_.push_back(node);
+      if (!schedule_->IsScheduled(node)) {
+        // Make sure root nodes are scheduled in their respective blocks.
+        Trace("Scheduling fixed position node #%d:%s\n", node->id(),
+              node->op()->mnemonic());
+        IrOpcode::Value opcode = node->opcode();
+        BasicBlock* block =
+            opcode == IrOpcode::kParameter
+                ? schedule_->start()
+                : schedule_->block(NodeProperties::GetControlInput(node));
+        DCHECK(block != NULL);
+        schedule_->AddNode(block, node);
+      }
+    }
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    // If the edge is from an unscheduled node, then tally it in the use count
+    // for all of its inputs. The same criterion will be used in ScheduleLate
+    // for decrementing use counts.
+    if (!schedule_->IsScheduled(from)) {
+      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
+      scheduler_->IncrementUnscheduledUseCount(to, index, from);
+    }
+  }
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+  Trace("--- PREPARE USES -------------------------------------------\n");
+
+  // Count the uses of every node, it will be used to ensure that all of a
+  // node's uses are scheduled before the node itself.
+  PrepareUsesVisitor prepare_uses(this);
+  graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 4: Schedule nodes early.
+
+
+class ScheduleEarlyNodeVisitor {
+ public:
+  ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
+
+  // Run the schedule early algorithm on a set of fixed root nodes.
+  void Run(NodeVector* roots) {
+    for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
+      queue_.push(*i);
+      while (!queue_.empty()) {
+        VisitNode(queue_.front());
+        queue_.pop();
+      }
+    }
+  }
+
+ private:
+  // Visits one node from the queue and propagates its current schedule early
+  // position to all uses. This in turn might push more nodes onto the queue.
+  void VisitNode(Node* node) {
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+    // Fixed nodes already know their schedule early position.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      data->minimum_block_ = schedule_->block(node);
+      Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+            node->id(), node->op()->mnemonic(),
+            data->minimum_block_->id().ToInt(),
+            data->minimum_block_->dominator_depth());
+    }
+
+    // No need to propagate unconstrained schedule early positions.
+    if (data->minimum_block_ == schedule_->start()) return;
+
+    // Propagate schedule early position.
+    DCHECK(data->minimum_block_ != NULL);
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+      PropagateMinimumPositionToNode(data->minimum_block_, *i);
+    }
+  }
+
+  // Propagates {block} as another minimum position into the given {node}. This
+  // has the net effect of computing the minimum dominator block of {node} that
+  // still post-dominates all inputs to {node} when the queue is processed.
+  void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+    // No need to propagate to fixed node, it's guaranteed to be a root.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
+
+    // Coupled nodes influence schedule early position of their control.
+    if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+      Node* control = NodeProperties::GetControlInput(node);
+      PropagateMinimumPositionToNode(block, control);
+    }
+
+    // Propagate new position if it is deeper down the dominator tree than the
+    // current. Note that all inputs need to have minimum block position inside
+    // the dominator chain of {node}'s minimum block position.
+    DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
+    if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
+      data->minimum_block_ = block;
+      queue_.push(node);
+      Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+            node->id(), node->op()->mnemonic(),
+            data->minimum_block_->id().ToInt(),
+            data->minimum_block_->dominator_depth());
+    }
+  }
+
+#if DEBUG
+  bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
+    BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+    return dominator == b1 || dominator == b2;
+  }
 #endif
-  return final_order;
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  ZoneQueue<Node*> queue_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+  Trace("--- SCHEDULE EARLY -----------------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (Node* node : schedule_root_nodes_) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Compute the minimum block for each node thereby determining the earliest
+  // position each node could be placed within a valid schedule.
+  ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+  schedule_early_visitor.Run(&schedule_root_nodes_);
 }
+
+
+// -----------------------------------------------------------------------------
+// Phase 5: Schedule nodes late.
+
+
+class ScheduleLateNodeVisitor {
+ public:
+  ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+  // Run the schedule late algorithm on a set of fixed root nodes.
+  void Run(NodeVector* roots) {
+    for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
+      ProcessQueue(*i);
+    }
+  }
+
+ private:
+  void ProcessQueue(Node* root) {
+    ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
+    for (Node* node : root->inputs()) {
+      // Don't schedule coupled nodes on their own.
+      if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+        node = NodeProperties::GetControlInput(node);
+      }
+
+      // Test schedulability condition by looking at unscheduled use count.
+      if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
+
+      queue->push(node);
+      while (!queue->empty()) {
+        VisitNode(queue->front());
+        queue->pop();
+      }
+    }
+  }
+
+  // Visits one node from the queue of schedulable nodes and determines its
+  // schedule late position. Also hoists nodes out of loops to find a more
+  // optimal scheduling position.
+  void VisitNode(Node* node) {
+    DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+
+    // Don't schedule nodes that are already scheduled.
+    if (schedule_->IsScheduled(node)) return;
+    DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
+
+    // Determine the dominating block for all of the uses of this node. It is
+    // the latest block that this node can be scheduled in.
+    Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
+    BasicBlock* block = GetCommonDominatorOfUses(node);
+    DCHECK_NOT_NULL(block);
+
+    // The schedule early block dominates the schedule late block.
+    BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
+    DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
+    Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
+          node->id(), node->op()->mnemonic(), block->id().ToInt(),
+          block->loop_depth(), min_block->id().ToInt());
+
+    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+    // into enclosing loop pre-headers until they would preceed their schedule
+    // early position.
+    BasicBlock* hoist_block = GetPreHeader(block);
+    while (hoist_block != NULL &&
+           hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+      Trace("  hoisting #%d:%s to block B%d\n", node->id(),
+            node->op()->mnemonic(), hoist_block->id().ToInt());
+      DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
+      block = hoist_block;
+      hoist_block = GetPreHeader(hoist_block);
+    }
+
+    // Schedule the node or a floating control structure.
+    if (NodeProperties::IsControl(node)) {
+      ScheduleFloatingControl(block, node);
+    } else {
+      ScheduleNode(block, node);
+    }
+  }
+
+  BasicBlock* GetPreHeader(BasicBlock* block) {
+    if (block->IsLoopHeader()) {
+      return block->dominator();
+    } else if (block->loop_header() != NULL) {
+      return block->loop_header()->dominator();
+    } else {
+      return NULL;
+    }
+  }
+
+  BasicBlock* GetCommonDominatorOfUses(Node* node) {
+    BasicBlock* block = NULL;
+    for (Edge edge : node->use_edges()) {
+      BasicBlock* use_block = GetBlockForUse(edge);
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+                                              : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    return block;
+  }
+
+  BasicBlock* GetBlockForUse(Edge edge) {
+    Node* use = edge.from();
+    IrOpcode::Value opcode = use->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      // If the use is from a coupled (i.e. floating) phi, compute the common
+      // dominator of its uses. This will not recurse more than one level.
+      if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
+        Trace("  inspecting uses of coupled #%d:%s\n", use->id(),
+              use->op()->mnemonic());
+        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+        return GetCommonDominatorOfUses(use);
+      }
+      // If the use is from a fixed (i.e. non-floating) phi, use the block
+      // of the corresponding control input to the merge.
+      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+        Trace("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
+              use->op()->mnemonic());
+        Node* merge = NodeProperties::GetControlInput(use, 0);
+        opcode = merge->opcode();
+        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+        use = NodeProperties::GetControlInput(merge, edge.index());
+      }
+    }
+    BasicBlock* result = schedule_->block(use);
+    if (result == NULL) return NULL;
+    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
+          use->op()->mnemonic(), result->id().ToInt());
+    return result;
+  }
+
+  void ScheduleFloatingControl(BasicBlock* block, Node* node) {
+    scheduler_->FuseFloatingControl(block, node);
+  }
+
+  void ScheduleNode(BasicBlock* block, Node* node) {
+    schedule_->PlanNode(block, node);
+    scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
+    scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+  Trace("--- SCHEDULE LATE ------------------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (Node* node : schedule_root_nodes_) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Schedule: Places nodes in dominator block of all their uses.
+  ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
+  schedule_late_visitor.Run(&schedule_root_nodes_);
 }
+
+
+// -----------------------------------------------------------------------------
+// Phase 6: Seal the final schedule.
+
+
+void Scheduler::SealFinalSchedule() {
+  Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+
+  // Serialize the assembly order and reverse-post-order numbering.
+  special_rpo_->SerializeRPOIntoSchedule();
+  special_rpo_->PrintAndVerifySpecialRPO();
+
+  // Add collected nodes for basic blocks to their blocks in the right order.
+  int block_num = 0;
+  for (NodeVector& nodes : scheduled_nodes_) {
+    BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
+    BasicBlock* block = schedule_->GetBlockById(id);
+    for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
+      schedule_->AddNode(block, *i);
+    }
+  }
 }
-}  // namespace v8::internal::compiler
+
+
+// -----------------------------------------------------------------------------
+
+
+void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
+  Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Schedule before control flow fusion:\n" << *schedule_;
+  }
+
+  // Iterate on phase 1: Build control-flow graph.
+  control_flow_builder_->Run(block, node);
+
+  // Iterate on phase 2: Compute special RPO and dominator tree.
+  special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
+  // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
+  for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+    b->set_dominator_depth(-1);
+    b->set_dominator(NULL);
+  }
+  PropagateImmediateDominators(block->rpo_next());
+
+  // Iterate on phase 4: Schedule nodes early.
+  // TODO(mstarzinger): The following loop gathering the propagation roots is a
+  // temporary solution and should be merged into the rest of the scheduler as
+  // soon as the approach settled for all floating loops.
+  NodeVector propagation_roots(control_flow_builder_->control_);
+  for (Node* node : control_flow_builder_->control_) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        propagation_roots.push_back(use);
+      }
+    }
+  }
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("propagation roots: ");
+    for (Node* node : propagation_roots) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+  ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+  schedule_early_visitor.Run(&propagation_roots);
+
+  // Move previously planned nodes.
+  // TODO(mstarzinger): Improve that by supporting bulk moves.
+  scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+  MovePlannedNodes(block, schedule_->block(node));
+
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Schedule after control flow fusion:\n" << *schedule_;
+  }
+}
+
+
+void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
+  Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
+        to->id().ToInt());
+  NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
+  for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
+    schedule_->SetBlockForNode(to, *i);
+    scheduled_nodes_[to->id().ToSize()].push_back(*i);
+  }
+  nodes->clear();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8