Add a linear scan register allocator to the optimizing compiler.

This is a "by-the-book" implementation. It currently only deals
with allocating registers, with no hint optimizations.

The changes remaining to make it functional are:
- Allocate spill slots.
- Resolution and placements of Move instructions.
- Connect it to the code generator.

Change-Id: Ie0b2f6ba1b98da85425be721ce4afecd6b4012a4
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 20e6aad..9f1d0f1 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -81,9 +81,11 @@
 	compiler/optimizing/find_loops_test.cc \
 	compiler/optimizing/linearize_test.cc \
 	compiler/optimizing/liveness_test.cc \
+	compiler/optimizing/live_interval_test.cc \
 	compiler/optimizing/live_ranges_test.cc \
 	compiler/optimizing/parallel_move_test.cc \
 	compiler/optimizing/pretty_printer_test.cc \
+	compiler/optimizing/register_allocator_test.cc \
 	compiler/optimizing/ssa_test.cc \
 	compiler/output_stream_test.cc \
 	compiler/utils/arena_allocator_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 021392c..f297213 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -86,6 +86,7 @@
 	optimizing/nodes.cc \
 	optimizing/optimizing_compiler.cc \
 	optimizing/parallel_move_resolver.cc \
+	optimizing/register_allocator.cc \
 	optimizing/ssa_builder.cc \
 	optimizing/ssa_liveness_analysis.cc \
 	trampolines/trampoline_compiler.cc \
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e18902f..e197ccd 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -74,6 +74,13 @@
   void SetFrameSize(uint32_t size) { frame_size_ = size; }
   uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
 
+  virtual size_t GetNumberOfCoreRegisters() const = 0;
+  virtual size_t GetNumberOfFloatingPointRegisters() const = 0;
+  virtual size_t GetNumberOfRegisters() const = 0;
+  virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
+
   void RecordPcInfo(uint32_t dex_pc) {
     struct PcInfo pc_info;
     pc_info.dex_pc = dex_pc;
@@ -92,8 +99,7 @@
         graph_(graph),
         block_labels_(graph->GetArena(), 0),
         pc_infos_(graph->GetArena(), 32),
-        blocked_registers_(static_cast<bool*>(
-            graph->GetArena()->Alloc(number_of_registers * sizeof(bool), kArenaAllocData))) {
+        blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)) {
     block_labels_.SetSize(graph->GetBlocks().Size());
   }
   ~CodeGenerator() { }
@@ -109,9 +115,6 @@
   // the first available register.
   size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
 
-  virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
-  virtual size_t GetNumberOfRegisters() const = 0;
-
   virtual Location GetStackLocation(HLoadLocal* load) const = 0;
 
   // Frame size required for this method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f1b16a1..ed3f43c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -37,6 +37,14 @@
 static constexpr int kNumberOfPushedRegistersAtEntry = 1;
 static constexpr int kCurrentMethodStackOffset = 0;
 
+void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
+  stream << ArmManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+  stream << ArmManagedRegister::FromDRegister(DRegister(reg));
+}
+
 CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
     : CodeGenerator(graph, kNumberOfRegIds),
       location_builder_(graph, this),
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 2405d4b..423b13e 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -133,6 +133,17 @@
   int32_t GetStackSlot(HLocal* local) const;
   virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
+  virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
+    return kNumberOfCoreRegisters;
+  }
+
+  virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
+    return kNumberOfDRegisters;
+  }
+
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
  private:
   // Helper method to move a 32bits value between two locations.
   void Move32(Location destination, Location source);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index b8b25f9..8bfd8d6 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -37,6 +37,14 @@
 static constexpr int kNumberOfPushedRegistersAtEntry = 1;
 static constexpr int kCurrentMethodStackOffset = 0;
 
+void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
+  stream << X86ManagedRegister::FromCpuRegister(Register(reg));
+}
+
+void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+  stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg));
+}
+
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
     : CodeGenerator(graph, kNumberOfRegIds),
       location_builder_(graph, this),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1ee11bf..4a70636 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -134,6 +134,17 @@
   int32_t GetStackSlot(HLocal* local) const;
   virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
+  virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
+    return kNumberOfCpuRegisters;
+  }
+
+  virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
+    return kNumberOfXmmRegisters;
+  }
+
+  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
  private:
   // Helper method to move a 32bits value between two locations.
   void Move32(Location destination, Location source);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 52e3e37..5c5042e 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -16,6 +16,7 @@
 
 #include "graph_visualizer.h"
 
+#include "code_generator.h"
 #include "driver/dex_compilation_unit.h"
 #include "nodes.h"
 #include "ssa_liveness_analysis.h"
@@ -27,8 +28,8 @@
  */
 class HGraphVisualizerPrinter : public HGraphVisitor {
  public:
-  HGraphVisualizerPrinter(HGraph* graph, std::ostream& output)
-      : HGraphVisitor(graph), output_(output), indent_(0) {}
+  HGraphVisualizerPrinter(HGraph* graph, std::ostream& output, const CodeGenerator& codegen)
+      : HGraphVisitor(graph), output_(output), codegen_(codegen), indent_(0) {}
 
   void StartTag(const char* name) {
     AddIndent();
@@ -107,17 +108,18 @@
       output_ << " (liveness: " << instruction->GetLifetimePosition();
       if (instruction->HasLiveInterval()) {
         output_ << " ";
-        const GrowableArray<LiveRange>& ranges = instruction->GetLiveInterval()->GetRanges();
-        size_t i = ranges.Size() - 1;
-        do {
-          output_ << "[" << ranges.Get(i).GetStart() << "," << ranges.Get(i).GetEnd() << "[";
-          if (i == 0) {
-            break;
+        const LiveInterval& interval = *instruction->GetLiveInterval();
+        interval.Dump(output_);
+        if (interval.HasRegister()) {
+          int reg = interval.GetRegister();
+          output_ << " ";
+          if (instruction->GetType() == Primitive::kPrimFloat
+              || instruction->GetType() == Primitive::kPrimDouble) {
+            codegen_.DumpFloatingPointRegister(output_, reg);
           } else {
-            --i;
-            output_ << ",";
+            codegen_.DumpCoreRegister(output_, reg);
           }
-        } while (true);
+        }
       }
       output_ << ")";
     }
@@ -186,6 +188,7 @@
 
  private:
   std::ostream& output_;
+  const CodeGenerator& codegen_;
   size_t indent_;
 
   DISALLOW_COPY_AND_ASSIGN(HGraphVisualizerPrinter);
@@ -194,8 +197,9 @@
 HGraphVisualizer::HGraphVisualizer(std::ostream* output,
                                    HGraph* graph,
                                    const char* string_filter,
+                                   const CodeGenerator& codegen,
                                    const DexCompilationUnit& cu)
-    : output_(output), graph_(graph), is_enabled_(false) {
+    : output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
   if (output == nullptr) {
     return;
   }
@@ -205,7 +209,7 @@
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_);
+  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", pretty_name.c_str());
   printer.PrintProperty("method", pretty_name.c_str());
@@ -215,14 +219,15 @@
 
 HGraphVisualizer::HGraphVisualizer(std::ostream* output,
                                    HGraph* graph,
+                                   const CodeGenerator& codegen,
                                    const char* name)
-    : output_(output), graph_(graph), is_enabled_(false) {
+    : output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
   if (output == nullptr) {
     return;
   }
 
   is_enabled_ = true;
-  HGraphVisualizerPrinter printer(graph, *output_);
+  HGraphVisualizerPrinter printer(graph, *output_, codegen_);
   printer.StartTag("compilation");
   printer.PrintProperty("name", name);
   printer.PrintProperty("method", name);
@@ -234,7 +239,7 @@
   if (!is_enabled_) {
     return;
   }
-  HGraphVisualizerPrinter printer(graph_, *output_);
+  HGraphVisualizerPrinter printer(graph_, *output_, codegen_);
   printer.Run(pass_name);
 }
 
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 2b88e65..2638cf5 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -21,6 +21,7 @@
 
 namespace art {
 
+class CodeGenerator;
 class DexCompilationUnit;
 class HGraph;
 
@@ -39,13 +40,17 @@
   HGraphVisualizer(std::ostream* output,
                    HGraph* graph,
                    const char* string_filter,
+                   const CodeGenerator& codegen,
                    const DexCompilationUnit& cu);
 
   /**
    * Version of `HGraphVisualizer` for unit testing, that is when a
    * `DexCompilationUnit` is not available.
    */
-  HGraphVisualizer(std::ostream* output, HGraph* graph, const char* name);
+  HGraphVisualizer(std::ostream* output,
+                   HGraph* graph,
+                   const CodeGenerator& codegen,
+                   const char* name);
 
   /**
    * If this visualizer is enabled, emit the compilation information
@@ -56,6 +61,7 @@
  private:
   std::ostream* const output_;
   HGraph* const graph_;
+  const CodeGenerator& codegen_;
 
   // Is true when `output_` is not null, and the compiled method's name
   // contains the string_filter given in the constructor.
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
new file mode 100644
index 0000000..3e4b83b
--- /dev/null
+++ b/compiler/optimizing/live_interval_test.cc
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "optimizing_unit_test.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(LiveInterval, GetStart) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_EQ(0u, interval->GetStart());
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_EQ(4u, interval->GetStart());
+  }
+}
+
+TEST(LiveInterval, IsDeadAt) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->IsDeadAt(42));
+    ASSERT_TRUE(interval->IsDeadAt(43));
+    ASSERT_FALSE(interval->IsDeadAt(41));
+    ASSERT_FALSE(interval->IsDeadAt(0));
+    ASSERT_FALSE(interval->IsDeadAt(22));
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->IsDeadAt(16));
+    ASSERT_TRUE(interval->IsDeadAt(32));
+    ASSERT_FALSE(interval->IsDeadAt(0));
+    ASSERT_FALSE(interval->IsDeadAt(4));
+    ASSERT_FALSE(interval->IsDeadAt(12));
+    ASSERT_FALSE(interval->IsDeadAt(13));
+    ASSERT_FALSE(interval->IsDeadAt(14));
+    ASSERT_FALSE(interval->IsDeadAt(15));
+  }
+}
+
+TEST(LiveInterval, Covers) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->Covers(0));
+    ASSERT_TRUE(interval->Covers(4));
+    ASSERT_TRUE(interval->Covers(41));
+    ASSERT_FALSE(interval->Covers(42));
+    ASSERT_FALSE(interval->Covers(54));
+  }
+
+  {
+    static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    ASSERT_TRUE(interval->Covers(4));
+    ASSERT_TRUE(interval->Covers(11));
+    ASSERT_TRUE(interval->Covers(14));
+    ASSERT_TRUE(interval->Covers(15));
+    ASSERT_FALSE(interval->Covers(0));
+    ASSERT_FALSE(interval->Covers(12));
+    ASSERT_FALSE(interval->Covers(13));
+    ASSERT_FALSE(interval->Covers(16));
+  }
+}
+
+TEST(LiveInterval, FirstIntersectionWith) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 42}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2));
+  }
+
+  {
+    static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}};
+    LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+    static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}};
+    LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+
+    ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2));
+  }
+}
+
+static bool RangesEquals(LiveInterval* interval,
+                         const size_t expected[][2],
+                         size_t number_of_expected_ranges) {
+  LiveRange* current = interval->GetFirstRange();
+
+  size_t i = 0;
+  for (;
+       i < number_of_expected_ranges && current != nullptr;
+       ++i, current = current->GetNext()) {
+    if (expected[i][0] != current->GetStart()) {
+      return false;
+    }
+    if (expected[i][1] != current->GetEnd()) {
+      return false;
+    }
+  }
+
+  if (current != nullptr || i != number_of_expected_ranges) {
+    return false;
+  }
+
+  return true;
+}
+
+TEST(LiveInterval, SplitAt) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  {
+    // Test within one range.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(1);
+    static constexpr size_t expected[][2] = {{0, 1}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{1, 4}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test just before the end of one range.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(3);
+    static constexpr size_t expected[][2] = {{0, 3}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{3, 4}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test withing the first range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(1);
+    static constexpr size_t expected[][2] = {{0, 1}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{1, 4}, {8, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test in a hole.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(5);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{8, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test withing the second range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(9);
+    static constexpr size_t expected[][2] = {{0, 4}, {8, 9}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{9, 12}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test at the beginning of the second range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(6);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{6, 10}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test at the end of the first range.
+    static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(4);
+    static constexpr size_t expected[][2] = {{0, 4}};
+    ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
+    static constexpr size_t expected_split[][2] = {{6, 10}};
+    ASSERT_TRUE(RangesEquals(split, expected_split, arraysize(expected_split)));
+  }
+
+  {
+    // Test that we get null if we split at a position where the interval is dead.
+    static constexpr size_t ranges[][2] = {{0, 4}};
+    LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+    LiveInterval* split = interval->SplitAt(5);
+    ASSERT_TRUE(split == nullptr);
+    ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges)));
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 9849388..c797497 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -43,11 +43,11 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: goto
+   *       4: goto
    *           |
-   *       6: return
+   *       8: return
    *           |
-   *       9: exit
+   *       12: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -60,14 +60,14 @@
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the return instruction.
-  ASSERT_EQ(6u, range.GetEnd());
+  ASSERT_EQ(8u, range->GetEnd());
   HBasicBlock* block = graph->GetBlocks().Get(1);
   ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
-  ASSERT_EQ(6u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, CFG2) {
@@ -81,16 +81,16 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: goto
+   *       4: goto
    *           |
-   *       6: equal
-   *       7: if
+   *       8: equal
+   *       10: if
    *       /       \
-   *   10: goto   13: goto
+   *   14: goto   18: goto
    *       \       /
-   *       16: return
+   *       22: return
    *         |
-   *       19: exit
+   *       26: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -105,14 +105,14 @@
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the return instruction.
-  ASSERT_EQ(16u, range.GetEnd());
+  ASSERT_EQ(22u, range->GetEnd());
   HBasicBlock* block = graph->GetBlocks().Get(3);
   ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
-  ASSERT_EQ(16u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, CFG3) {
@@ -127,18 +127,18 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: constant4
-   *       4: goto
+   *       4: constant4
+   *       6: goto
    *           |
-   *       7: equal
-   *       8: if
+   *       10: equal
+   *       12: if
    *       /       \
-   *   11: goto   14: goto
+   *   16: goto   20: goto
    *       \       /
-   *       16: phi
-   *       17: return
+   *       22: phi
+   *       24: return
    *         |
-   *       20: exit
+   *       38: exit
    */
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -154,34 +154,34 @@
 
   // Test for the 0 constant.
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the phi at the return block so instruction is live until
   // the end of the then block.
-  ASSERT_EQ(12u, range.GetEnd());
+  ASSERT_EQ(18u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 4 constant.
   interval = liveness.GetInstructionFromSsaIndex(1)->GetLiveInterval();
   // The then branch is a hole for this constant, therefore its interval has 2 ranges.
-  ASSERT_EQ(2u, interval->GetRanges().Size());
-  // First range is the else block.
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(13u, range.GetStart());
-  // Last use is the phi at the return block.
-  ASSERT_EQ(15u, range.GetEnd());
-  // Second range starts from the definition and ends at the if block.
-  range = interval->GetRanges().Get(1);
-  ASSERT_EQ(3u, range.GetStart());
+  // First range starts from the definition and ends at the if block.
+  range = interval->GetFirstRange();
+  ASSERT_EQ(4u, range->GetStart());
   // 9 is the end of the if block.
-  ASSERT_EQ(9u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetEnd());
+  // Second range is the else block.
+  range = range->GetNext();
+  ASSERT_EQ(18u, range->GetStart());
+  // Last use is the phi at the return block.
+  ASSERT_EQ(22u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the phi.
   interval = liveness.GetInstructionFromSsaIndex(3)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(16u, range.GetStart());
-  ASSERT_EQ(17u, range.GetEnd());
+  range = interval->GetFirstRange();
+  ASSERT_EQ(22u, range->GetStart());
+  ASSERT_EQ(24u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 TEST(LiveRangesTest, Loop) {
@@ -195,21 +195,21 @@
    *
    * Which becomes the following graph (numbered by lifetime position):
    *       2: constant0
-   *       3: constant4
-   *       4: constant5
-   *       5: goto
-   *           |
+   *       4: constant4
+   *       6: constant5
    *       8: goto
    *           |
-   *       10: phi
-   *       11: equal
-   *       12: if +++++
+   *       12: goto
+   *           |
+   *       14: phi
+   *       16: equal
+   *       18: if +++++
    *        |       \ +
-   *        |     15: goto
+   *        |     22: goto
    *        |
-   *       18: return
+   *       26: return
    *         |
-   *       21: exit
+   *       30: exit
    */
 
   const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
@@ -228,36 +228,36 @@
 
   // Test for the 0 constant.
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  LiveRange range = interval->GetRanges().Get(0);
-  ASSERT_EQ(2u, range.GetStart());
+  LiveRange* range = interval->GetFirstRange();
+  ASSERT_EQ(2u, range->GetStart());
   // Last use is the loop phi so instruction is live until
   // the end of the pre loop header.
-  ASSERT_EQ(9u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 4 constant.
   interval = liveness.GetInstructionFromSsaIndex(1)->GetLiveInterval();
+  range = interval->GetFirstRange();
   // The instruction is live until the end of the loop.
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(3u, range.GetStart());
-  ASSERT_EQ(16u, range.GetEnd());
+  ASSERT_EQ(4u, range->GetStart());
+  ASSERT_EQ(24u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the 5 constant.
   interval = liveness.GetInstructionFromSsaIndex(2)->GetLiveInterval();
-  // The instruction is live until the return instruction of the loop.
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
-  ASSERT_EQ(4u, range.GetStart());
-  ASSERT_EQ(18u, range.GetEnd());
+  range = interval->GetFirstRange();
+  // The instruction is live until the return instruction after the loop.
+  ASSERT_EQ(6u, range->GetStart());
+  ASSERT_EQ(26u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 
   // Test for the phi.
   interval = liveness.GetInstructionFromSsaIndex(3)->GetLiveInterval();
-  ASSERT_EQ(1u, interval->GetRanges().Size());
-  range = interval->GetRanges().Get(0);
+  range = interval->GetFirstRange();
   // Instruction is consumed by the if.
-  ASSERT_EQ(10u, range.GetStart());
-  ASSERT_EQ(11u, range.GetEnd());
+  ASSERT_EQ(14u, range->GetStart());
+  ASSERT_EQ(16u, range->GetEnd());
+  ASSERT_TRUE(range->GetNext() == nullptr);
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 74ba520..752466b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -388,6 +388,7 @@
 }
 
 void HInstruction::ReplaceWith(HInstruction* other) {
+  DCHECK(other != nullptr);
   for (HUseIterator<HInstruction> it(GetUses()); !it.Done(); it.Advance()) {
     HUseListNode<HInstruction>* current = it.Current();
     HInstruction* user = current->GetUser();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 476f24e..b1c8016 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -276,6 +276,7 @@
   HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
   const HInstructionList& GetInstructions() const { return instructions_; }
   const HInstructionList& GetPhis() const { return phis_; }
+  HInstruction* GetFirstPhi() const { return phis_.first_instruction_; }
 
   void AddSuccessor(HBasicBlock* block) {
     successors_.Add(block);
@@ -397,9 +398,9 @@
 #undef FORWARD_DECLARATION
 
 #define DECLARE_INSTRUCTION(type)                          \
-  virtual void Accept(HGraphVisitor* visitor);             \
   virtual const char* DebugName() const { return #type; }  \
   virtual H##type* As##type() { return this; }             \
+  virtual void Accept(HGraphVisitor* visitor)              \
 
 template <typename T>
 class HUseListNode : public ArenaObject {
@@ -734,7 +735,7 @@
  public:
   HReturnVoid() { }
 
-  DECLARE_INSTRUCTION(ReturnVoid)
+  DECLARE_INSTRUCTION(ReturnVoid);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
@@ -748,7 +749,7 @@
     SetRawInputAt(0, value);
   }
 
-  DECLARE_INSTRUCTION(Return)
+  DECLARE_INSTRUCTION(Return);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HReturn);
@@ -761,7 +762,7 @@
  public:
   HExit() { }
 
-  DECLARE_INSTRUCTION(Exit)
+  DECLARE_INSTRUCTION(Exit);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HExit);
@@ -776,7 +777,7 @@
     return GetBlock()->GetSuccessors().Get(0);
   }
 
-  DECLARE_INSTRUCTION(Goto)
+  DECLARE_INSTRUCTION(Goto);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HGoto);
@@ -798,7 +799,7 @@
     return GetBlock()->GetSuccessors().Get(1);
   }
 
-  DECLARE_INSTRUCTION(If)
+  DECLARE_INSTRUCTION(If);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HIf);
@@ -837,7 +838,7 @@
 
   virtual Primitive::Type GetType() const { return Primitive::kPrimBoolean; }
 
-  DECLARE_INSTRUCTION(Equal)
+  DECLARE_INSTRUCTION(Equal);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HEqual);
@@ -848,7 +849,7 @@
  public:
   explicit HLocal(uint16_t reg_number) : reg_number_(reg_number) { }
 
-  DECLARE_INSTRUCTION(Local)
+  DECLARE_INSTRUCTION(Local);
 
   uint16_t GetRegNumber() const { return reg_number_; }
 
@@ -870,7 +871,7 @@
 
   HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
 
-  DECLARE_INSTRUCTION(LoadLocal)
+  DECLARE_INSTRUCTION(LoadLocal);
 
  private:
   const Primitive::Type type_;
@@ -889,7 +890,7 @@
 
   HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
 
-  DECLARE_INSTRUCTION(StoreLocal)
+  DECLARE_INSTRUCTION(StoreLocal);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HStoreLocal);
@@ -904,7 +905,7 @@
   int32_t GetValue() const { return value_; }
   virtual Primitive::Type GetType() const { return Primitive::kPrimInt; }
 
-  DECLARE_INSTRUCTION(IntConstant)
+  DECLARE_INSTRUCTION(IntConstant);
 
  private:
   const int32_t value_;
@@ -920,7 +921,7 @@
 
   virtual Primitive::Type GetType() const { return Primitive::kPrimLong; }
 
-  DECLARE_INSTRUCTION(LongConstant)
+  DECLARE_INSTRUCTION(LongConstant);
 
  private:
   const int64_t value_;
@@ -980,7 +981,7 @@
 
   uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
 
-  DECLARE_INSTRUCTION(InvokeStatic)
+  DECLARE_INSTRUCTION(InvokeStatic);
 
  private:
   const uint32_t index_in_dex_cache_;
@@ -1000,7 +1001,7 @@
   // Calls runtime so needs an environment.
   virtual bool NeedsEnvironment() const { return true; }
 
-  DECLARE_INSTRUCTION(NewInstance)
+  DECLARE_INSTRUCTION(NewInstance);
 
  private:
   const uint32_t dex_pc_;
@@ -1091,15 +1092,16 @@
   void AddInput(HInstruction* input);
 
   virtual Primitive::Type GetType() const { return type_; }
+  void SetType(Primitive::Type type) { type_ = type; }
 
   uint32_t GetRegNumber() const { return reg_number_; }
 
-  DECLARE_INSTRUCTION(Phi)
+  DECLARE_INSTRUCTION(Phi);
 
  protected:
   GrowableArray<HInstruction*> inputs_;
   const uint32_t reg_number_;
-  const Primitive::Type type_;
+  Primitive::Type type_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HPhi);
@@ -1179,7 +1181,7 @@
 
   size_t NumMoves() const { return moves_.Size(); }
 
-  DECLARE_INSTRUCTION(ParallelMove)
+  DECLARE_INSTRUCTION(ParallelMove);
 
  private:
   GrowableArray<MoveOperands*> moves_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 286f48a..dfbb488 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -24,6 +24,7 @@
 #include "driver/dex_compilation_unit.h"
 #include "graph_visualizer.h"
 #include "nodes.h"
+#include "register_allocator.h"
 #include "ssa_liveness_analysis.h"
 #include "utils/arena_allocator.h"
 
@@ -96,8 +97,6 @@
     }
     return nullptr;
   }
-  HGraphVisualizer visualizer(visualizer_output_.get(), graph, kStringFilter, dex_compilation_unit);
-  visualizer.DumpGraph("builder");
 
   InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
   // The optimizing compiler currently does not have a Thumb2 assembler.
@@ -112,6 +111,10 @@
     return nullptr;
   }
 
+  HGraphVisualizer visualizer(
+      visualizer_output_.get(), graph, kStringFilter, *codegen, dex_compilation_unit);
+  visualizer.DumpGraph("builder");
+
   CodeVectorAllocator allocator;
   codegen->Compile(&allocator);
 
@@ -128,9 +131,13 @@
   visualizer.DumpGraph("ssa");
 
   graph->FindNaturalLoops();
-  SsaLivenessAnalysis(*graph).Analyze();
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
   visualizer.DumpGraph("liveness");
 
+  RegisterAllocator(graph->GetArena(), *codegen).AllocateRegisters(liveness);
+  visualizer.DumpGraph("register");
+
   return new CompiledMethod(GetCompilerDriver(),
                             instruction_set,
                             allocator.GetMemory(),
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 67c4850..36a6a21 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,6 +17,10 @@
 #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 #define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 
+#include "ssa_liveness_analysis.h"
+
+namespace art {
+
 #define NUM_INSTRUCTIONS(...)  \
   (sizeof((uint16_t[]) {__VA_ARGS__}) /sizeof(uint16_t))
 
@@ -29,4 +33,21 @@
 #define TWO_REGISTERS_CODE_ITEM(...)                                       \
     { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
 
+#define THREE_REGISTERS_CODE_ITEM(...)                                     \
+    { 3, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+
+LiveInterval* BuildInterval(const size_t ranges[][2],
+                            size_t number_of_ranges,
+                            ArenaAllocator* allocator,
+                            int reg = -1) {
+  LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+  for (size_t i = number_of_ranges; i > 0; --i) {
+    interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
+  }
+  interval->SetRegister(reg);
+  return interval;
+}
+
+}  // namespace art
+
 #endif  // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
new file mode 100644
index 0000000..dd175d2
--- /dev/null
+++ b/compiler/optimizing/register_allocator.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocator.h"
+
+#include "code_generator.h"
+#include "ssa_liveness_analysis.h"
+
+namespace art {
+
+static constexpr size_t kMaxLifetimePosition = -1;
+
+RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen)
+      : allocator_(allocator),
+        codegen_(codegen),
+        unhandled_(allocator, 0),
+        handled_(allocator, 0),
+        active_(allocator, 0),
+        inactive_(allocator, 0),
+        processing_core_registers_(false),
+        number_of_registers_(-1),
+        registers_array_(nullptr),
+        blocked_registers_(allocator->AllocArray<bool>(codegen.GetNumberOfRegisters())) {
+  codegen.SetupBlockedRegisters(blocked_registers_);
+}
+
+static bool ShouldProcess(bool processing_core_registers, HInstruction* instruction) {
+  bool is_core_register = (instruction->GetType() != Primitive::kPrimDouble)
+      && (instruction->GetType() != Primitive::kPrimFloat);
+  return processing_core_registers == is_core_register;
+}
+
+void RegisterAllocator::AllocateRegistersInternal(const SsaLivenessAnalysis& liveness) {
+  number_of_registers_ = processing_core_registers_
+      ? codegen_.GetNumberOfCoreRegisters()
+      : codegen_.GetNumberOfFloatingPointRegisters();
+
+  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+
+  // Iterate post-order, to ensure the list is sorted, and the last added interval
+  // is the one with the lowest start position.
+  for (size_t i = liveness.GetNumberOfSsaValues(); i > 0; --i) {
+    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i - 1);
+    if (ShouldProcess(processing_core_registers_, instruction)) {
+      LiveInterval* current = instruction->GetLiveInterval();
+      DCHECK(unhandled_.IsEmpty() || current->StartsBefore(unhandled_.Peek()));
+      unhandled_.Add(current);
+    }
+  }
+
+  LinearScan();
+  if (kIsDebugBuild) {
+    ValidateInternal(liveness, true);
+  }
+}
+
+bool RegisterAllocator::ValidateInternal(const SsaLivenessAnalysis& liveness,
+                                         bool log_fatal_on_failure) const {
+  // To simplify unit testing, we eagerly create the array of intervals, and
+  // call the helper method.
+  GrowableArray<LiveInterval*> intervals(allocator_, 0);
+  for (size_t i = 0; i < liveness.GetNumberOfSsaValues(); ++i) {
+    HInstruction* instruction = liveness.GetInstructionFromSsaIndex(i);
+    if (ShouldProcess(processing_core_registers_, instruction)) {
+      intervals.Add(instruction->GetLiveInterval());
+    }
+  }
+  return ValidateIntervals(intervals, codegen_, allocator_, processing_core_registers_,
+                           log_fatal_on_failure);
+}
+
+bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& ranges,
+                                          const CodeGenerator& codegen,
+                                          ArenaAllocator* allocator,
+                                          bool processing_core_registers,
+                                          bool log_fatal_on_failure) {
+  size_t number_of_registers = processing_core_registers
+      ? codegen.GetNumberOfCoreRegisters()
+      : codegen.GetNumberOfFloatingPointRegisters();
+  GrowableArray<ArenaBitVector*> bit_vectors(allocator, number_of_registers);
+
+  // Allocate a bit vector per register. A live interval that has a register
+  // allocated will populate the associated bit vector based on its live ranges.
+  for (size_t i = 0; i < number_of_registers; i++) {
+    bit_vectors.Add(new (allocator) ArenaBitVector(allocator, 0, true));
+  }
+
+  for (size_t i = 0, e = ranges.Size(); i < e; ++i) {
+    LiveInterval* current = ranges.Get(i);
+    do {
+      if (!current->HasRegister()) {
+        continue;
+      }
+      BitVector* vector = bit_vectors.Get(current->GetRegister());
+      LiveRange* range = current->GetFirstRange();
+      do {
+        for (size_t j = range->GetStart(); j < range->GetEnd(); ++j) {
+          if (vector->IsBitSet(j)) {
+            if (log_fatal_on_failure) {
+              std::ostringstream message;
+              message << "Register conflict at " << j << " for ";
+              if (processing_core_registers) {
+                codegen.DumpCoreRegister(message, current->GetRegister());
+              } else {
+                codegen.DumpFloatingPointRegister(message, current->GetRegister());
+              }
+              LOG(FATAL) << message.str();
+            } else {
+              return false;
+            }
+          } else {
+            vector->SetBit(j);
+          }
+        }
+      } while ((range = range->GetNext()) != nullptr);
+    } while ((current = current->GetNextSibling()) != nullptr);
+  }
+  return true;
+}
+
+void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) {
+  interval->Dump(stream);
+  stream << ": ";
+  if (interval->HasRegister()) {
+    if (processing_core_registers_) {
+      codegen_.DumpCoreRegister(stream, interval->GetRegister());
+    } else {
+      codegen_.DumpFloatingPointRegister(stream, interval->GetRegister());
+    }
+  } else {
+    stream << "spilled";
+  }
+  stream << std::endl;
+}
+
+// By the book implementation of a linear scan register allocator.
+void RegisterAllocator::LinearScan() {
+  while (!unhandled_.IsEmpty()) {
+    // (1) Remove interval with the lowest start position from unhandled.
+    LiveInterval* current = unhandled_.Pop();
+    size_t position = current->GetStart();
+
+    // (2) Remove currently active intervals that are dead at this position.
+    //     Move active intervals that have a lifetime hole at this position
+    //     to inactive.
+    for (size_t i = 0; i < active_.Size(); ++i) {
+      LiveInterval* interval = active_.Get(i);
+      if (interval->IsDeadAt(position)) {
+        active_.Delete(interval);
+        --i;
+        handled_.Add(interval);
+      } else if (!interval->Covers(position)) {
+        active_.Delete(interval);
+        --i;
+        inactive_.Add(interval);
+      }
+    }
+
+    // (3) Remove currently inactive intervals that are dead at this position.
+    //     Move inactive intervals that cover this position to active.
+    for (size_t i = 0; i < inactive_.Size(); ++i) {
+      LiveInterval* interval = inactive_.Get(i);
+      if (interval->IsDeadAt(position)) {
+        inactive_.Delete(interval);
+        --i;
+        handled_.Add(interval);
+      } else if (interval->Covers(position)) {
+        inactive_.Delete(interval);
+        --i;
+        active_.Add(interval);
+      }
+    }
+
+    // (4) Try to find an available register.
+    bool success = TryAllocateFreeReg(current);
+
+    // (5) If no register could be found, we need to spill.
+    if (!success) {
+      success = AllocateBlockedReg(current);
+    }
+
+    // (6) If the interval had a register allocated, add it to the list of active
+    //     intervals.
+    if (success) {
+      active_.Add(current);
+    }
+  }
+}
+
+// Find a free register. If multiple are found, pick the register that
+// is free the longest.
+bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
+  size_t* free_until = registers_array_;
+
+  // First set all registers to be free.
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    free_until[i] = kMaxLifetimePosition;
+  }
+
+  // For each active interval, set its register to not free.
+  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+    LiveInterval* interval = active_.Get(i);
+    DCHECK(interval->HasRegister());
+    free_until[interval->GetRegister()] = 0;
+  }
+
+  // For each inactive interval, set its register to be free until
+  // the next intersection with `current`.
+  // Thanks to SSA, this should only be needed for intervals
+  // that are the result of a split.
+  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+    LiveInterval* inactive = inactive_.Get(i);
+    DCHECK(inactive->HasRegister());
+    size_t next_intersection = inactive->FirstIntersectionWith(current);
+    if (next_intersection != kNoLifetime) {
+      free_until[inactive->GetRegister()] = next_intersection;
+    }
+  }
+
+  // Pick the register that is free the longest.
+  int reg = -1;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    if (IsBlocked(i)) continue;
+    if (reg == -1 || free_until[i] > free_until[reg]) {
+      reg = i;
+      if (free_until[i] == kMaxLifetimePosition) break;
+    }
+  }
+
+  // If we could not find a register, we need to spill.
+  if (reg == -1 || free_until[reg] == 0) {
+    return false;
+  }
+
+  current->SetRegister(reg);
+  if (!current->IsDeadAt(free_until[reg])) {
+    // If the register is only available for a subset of live ranges
+    // covered by `current`, split `current` at the position where
+    // the register is not available anymore.
+    LiveInterval* split = Split(current, free_until[reg]);
+    DCHECK(split != nullptr);
+    AddToUnhandled(split);
+  }
+  return true;
+}
+
+bool RegisterAllocator::IsBlocked(int reg) const {
+  // TODO: This only works for core registers and needs to be adjusted for
+  // floating point registers.
+  DCHECK(processing_core_registers_);
+  return blocked_registers_[reg];
+}
+
+// Find the register that is used the last, and spill the interval
+// that holds it. If the first use of `current` is after that register
+// we spill `current` instead.
+bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
+  size_t first_register_use = current->FirstRegisterUse();
+  if (current->FirstRegisterUse() == kNoLifetime) {
+    // TODO: Allocate spill slot for `current`.
+    return false;
+  }
+
+  // First set all registers as not being used.
+  size_t* next_use = registers_array_;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    next_use[i] = kMaxLifetimePosition;
+  }
+
+  // For each active interval, find the next use of its register after the
+  // start of current.
+  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+    LiveInterval* active = active_.Get(i);
+    DCHECK(active->HasRegister());
+    size_t use = active->FirstRegisterUseAfter(current->GetStart());
+    if (use != kNoLifetime) {
+      next_use[active->GetRegister()] = use;
+    }
+  }
+
+  // For each inactive interval, find the next use of its register after the
+  // start of current.
+  // Thanks to SSA, this should only be needed for intervals
+  // that are the result of a split.
+  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+    LiveInterval* inactive = inactive_.Get(i);
+    DCHECK(inactive->HasRegister());
+    size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
+    if (use != kNoLifetime) {
+      next_use[inactive->GetRegister()] = use;
+    }
+  }
+
+  // Pick the register that is used the last.
+  int reg = -1;
+  for (size_t i = 0; i < number_of_registers_; ++i) {
+    if (IsBlocked(i)) continue;
+    if (reg == -1 || next_use[i] > next_use[reg]) {
+      reg = i;
+      if (next_use[i] == kMaxLifetimePosition) break;
+    }
+  }
+
+  if (first_register_use >= next_use[reg]) {
+    // If the first use of that instruction is after the last use of the found
+    // register, we split this interval just before its first register use.
+    LiveInterval* split = Split(current, first_register_use - 1);
+    AddToUnhandled(split);
+    return false;
+  } else {
+    // Use this register and spill the active and inactives interval that
+    // have that register.
+    current->SetRegister(reg);
+
+    for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+      LiveInterval* active = active_.Get(i);
+      if (active->GetRegister() == reg) {
+        LiveInterval* split = Split(active, current->GetStart());
+        active_.DeleteAt(i);
+        handled_.Add(active);
+        AddToUnhandled(split);
+        break;
+      }
+    }
+
+    for (size_t i = 0; i < inactive_.Size(); ++i) {
+      LiveInterval* inactive = inactive_.Get(i);
+      if (inactive->GetRegister() == reg) {
+        LiveInterval* split = Split(inactive, current->GetStart());
+        inactive_.DeleteAt(i);
+        handled_.Add(inactive);
+        AddToUnhandled(split);
+        --i;
+      }
+    }
+
+    return true;
+  }
+}
+
+void RegisterAllocator::AddToUnhandled(LiveInterval* interval) {
+  for (size_t i = unhandled_.Size(); i > 0; --i) {
+    LiveInterval* current = unhandled_.Get(i - 1);
+    if (current->StartsAfter(interval)) {
+      unhandled_.InsertAt(i, interval);
+      break;
+    }
+  }
+}
+
+LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
+  DCHECK(position >= interval->GetStart());
+  DCHECK(!interval->IsDeadAt(position));
+  if (position == interval->GetStart()) {
+    // Spill slot will be allocated when handling `interval` again.
+    interval->ClearRegister();
+    return interval;
+  } else {
+    LiveInterval* new_interval = interval->SplitAt(position);
+    // TODO: Allocate spill slot for `interval`.
+    return new_interval;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
new file mode 100644
index 0000000..e575b96
--- /dev/null
+++ b/compiler/optimizing/register_allocator.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+
+#include "base/macros.h"
+#include "utils/growable_array.h"
+
+namespace art {
+
+class CodeGenerator;
+class LiveInterval;
+class SsaLivenessAnalysis;
+
+/**
+ * An implementation of a linear scan register allocator on an `HGraph` with SSA form.
+ */
+class RegisterAllocator {
+ public:
+  RegisterAllocator(ArenaAllocator* allocator, const CodeGenerator& codegen);
+
+  // Main entry point for the register allocator. Given the liveness analysis,
+  // allocates registers to live intervals.
+  void AllocateRegisters(const SsaLivenessAnalysis& liveness) {
+    processing_core_registers_ = true;
+    AllocateRegistersInternal(liveness);
+    processing_core_registers_ = false;
+    AllocateRegistersInternal(liveness);
+  }
+
+  // Validate that the register allocator did not allocate the same register to
+  // intervals that intersect each other. Returns false if it did not.
+  bool Validate(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) {
+    processing_core_registers_ = true;
+    if (!ValidateInternal(liveness, log_fatal_on_failure)) {
+      return false;
+    }
+    processing_core_registers_ = false;
+    return ValidateInternal(liveness, log_fatal_on_failure);
+  }
+
+  // Helper method for validation. Used by unit testing.
+  static bool ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
+                                const CodeGenerator& codegen,
+                                ArenaAllocator* allocator,
+                                bool processing_core_registers,
+                                bool log_fatal_on_failure);
+
+ private:
+  // Main methods of the allocator.
+  void LinearScan();
+  bool TryAllocateFreeReg(LiveInterval* interval);
+  bool AllocateBlockedReg(LiveInterval* interval);
+
+  // Add `interval` in the sorted list of unhandled intervals.
+  void AddToUnhandled(LiveInterval* interval);
+
+  // Split `interval` at the position `at`. The new interval starts at `at`.
+  LiveInterval* Split(LiveInterval* interval, size_t at);
+
+  // Returns whether `reg` is blocked by the code generator.
+  bool IsBlocked(int reg) const;
+
+  // Helper methods.
+  void AllocateRegistersInternal(const SsaLivenessAnalysis& liveness);
+  bool ValidateInternal(const SsaLivenessAnalysis& liveness, bool log_fatal_on_failure) const;
+  void DumpInterval(std::ostream& stream, LiveInterval* interval);
+
+  ArenaAllocator* const allocator_;
+  const CodeGenerator& codegen_;
+
+  // List of intervals that must be processed, ordered by start position. Last entry
+  // is the interval that has the lowest start position.
+  GrowableArray<LiveInterval*> unhandled_;
+
+  // List of intervals that have been processed.
+  GrowableArray<LiveInterval*> handled_;
+
+  // List of intervals that are currently active when processing a new live interval.
+  // That is, they have a live range that spans the start of the new interval.
+  GrowableArray<LiveInterval*> active_;
+
+  // List of intervals that are currently inactive when processing a new live interval.
+  // That is, they have a lifetime hole that spans the start of the new interval.
+  GrowableArray<LiveInterval*> inactive_;
+
+  // True if processing core registers. False if processing floating
+  // point registers.
+  bool processing_core_registers_;
+
+  // Number of registers for the current register kind (core or floating point).
+  size_t number_of_registers_;
+
+  // Temporary array, allocated ahead of time for simplicity.
+  size_t* registers_array_;
+
+  // Blocked registers, as decided by the code generator.
+  bool* const blocked_registers_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
new file mode 100644
index 0000000..019d0f8
--- /dev/null
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "code_generator.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "register_allocator.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// Note: the register allocator tests rely on the fact that constants have live
+// intervals and registers get allocated to them.
+
+static bool Check(const uint16_t* data) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraphBuilder builder(&allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  RegisterAllocator register_allocator(&allocator, *codegen);
+  register_allocator.AllocateRegisters(liveness);
+  return register_allocator.Validate(liveness, false);
+}
+
+/**
+ * Unit testing of RegisterAllocator::ValidateIntervals. Register allocator
+ * tests are based on this validation method.
+ */
+TEST(RegisterAllocatorTest, ValidateIntervals) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  GrowableArray<LiveInterval*> intervals(&allocator, 0);
+
+  // Test with two intervals of the same range.
+  {
+    static constexpr size_t ranges[][2] = {{0, 42}};
+    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
+    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with two non-intersecting intervals.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 43}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with two non-intersecting intervals, with one with a lifetime hole.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 43}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with intersecting intervals.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    static constexpr size_t ranges2[][2] = {{42, 47}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+    intervals.Reset();
+  }
+
+  // Test with siblings.
+  {
+    static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
+    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals.Get(0)->SplitAt(43);
+    static constexpr size_t ranges2[][2] = {{42, 47}};
+    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(1)->SetRegister(0);
+    // Sibling of the first interval has no register allocated to it.
+    ASSERT_TRUE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+
+    intervals.Get(0)->GetNextSibling()->SetRegister(0);
+    ASSERT_FALSE(RegisterAllocator::ValidateIntervals(intervals, *codegen, &allocator, true, false));
+  }
+}
+
+TEST(RegisterAllocatorTest, CFG1) {
+  /*
+   * Test the following snippet:
+   *  return 0;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       goto
+   *        |
+   *       return
+   *        |
+   *       exit
+   */
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::RETURN);
+
+  ASSERT_TRUE(Check(data));
+}
+
+TEST(RegisterAllocatorTest, Loop1) {
+  /*
+   * Test the following snippet:
+   *  int a = 0;
+   *  while (a == a) {
+   *    a = 4;
+   *  }
+   *  return 5;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant4
+   *       constant5
+   *       goto
+   *        |
+   *       goto
+   *        |
+   *       phi
+   *       equal
+   *       if +++++
+   *        |       \ +
+   *        |     goto
+   *        |
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 4,
+    Instruction::CONST_4 | 4 << 12 | 0,
+    Instruction::GOTO | 0xFD00,
+    Instruction::CONST_4 | 5 << 12 | 1 << 8,
+    Instruction::RETURN | 1 << 8);
+
+  ASSERT_TRUE(Check(data));
+}
+
+TEST(RegisterAllocatorTest, Loop2) {
+  /*
+   * Test the following snippet:
+   *  int a = 0;
+   *  while (a == 8) {
+   *    a = 4 + 5;
+   *  }
+   *  return 6 + 7;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant4
+   *       constant5
+   *       constant6
+   *       constant7
+   *       constant8
+   *       goto
+   *        |
+   *       goto
+   *        |
+   *       phi
+   *       equal
+   *       if +++++
+   *        |       \ +
+   *        |      4 + 5
+   *        |      goto
+   *        |
+   *       6 + 7
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::CONST_4 | 8 << 12 | 1 << 8,
+    Instruction::IF_EQ | 1 << 8, 7,
+    Instruction::CONST_4 | 4 << 12 | 0 << 8,
+    Instruction::CONST_4 | 5 << 12 | 1 << 8,
+    Instruction::ADD_INT, 1 << 8 | 0,
+    Instruction::GOTO | 0xFA00,
+    Instruction::CONST_4 | 6 << 12 | 1 << 8,
+    Instruction::CONST_4 | 7 << 12 | 1 << 8,
+    Instruction::ADD_INT, 1 << 8 | 0,
+    Instruction::RETURN | 1 << 8);
+
+  ASSERT_TRUE(Check(data));
+}
+
+static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
+  HGraphBuilder builder(allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+  return graph;
+}
+
+TEST(RegisterAllocatorTest, Loop3) {
+  /*
+   * Test the following snippet:
+   *  int a = 0
+   *  do {
+   *    b = a;
+   *    a++;
+   *  } while (a != 5)
+   *  return b;
+   *
+   * Which becomes the following graph:
+   *       constant0
+   *       constant1
+   *       constant5
+   *       goto
+   *        |
+   *       goto
+   *        |++++++++++++
+   *       phi          +
+   *       a++          +
+   *       equals       +
+   *       if           +
+   *        |++++++++++++
+   *       return
+   *        |
+   *       exit
+   */
+
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
+    Instruction::CONST_4 | 5 << 12 | 2 << 8,
+    Instruction::IF_NE | 1 << 8 | 2 << 12, 3,
+    Instruction::RETURN | 0 << 8,
+    Instruction::MOVE | 1 << 12 | 0 << 8,
+    Instruction::GOTO | 0xF900);
+
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = BuildSSAGraph(data, &allocator);
+  SsaLivenessAnalysis liveness(*graph);
+  liveness.Analyze();
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  RegisterAllocator register_allocator(&allocator, *codegen);
+  register_allocator.AllocateRegisters(liveness);
+  ASSERT_TRUE(register_allocator.Validate(liveness, false));
+
+  HBasicBlock* loop_header = graph->GetBlocks().Get(2);
+  HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
+
+  LiveInterval* phi_interval = phi->GetLiveInterval();
+  LiveInterval* loop_update = phi->InputAt(1)->GetLiveInterval();
+  ASSERT_TRUE(phi_interval->HasRegister());
+  ASSERT_TRUE(loop_update->HasRegister());
+  ASSERT_NE(phi_interval->GetRegister(), loop_update->GetRegister());
+
+  HBasicBlock* return_block = graph->GetBlocks().Get(3);
+  HReturn* ret = return_block->GetFirstInstruction()->AsReturn();
+  ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 50e3254..33084df 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -19,6 +19,18 @@
 
 namespace art {
 
+static Primitive::Type MergeTypes(Primitive::Type existing, Primitive::Type new_type) {
+  // We trust the verifier has already done the necessary checking.
+  switch (existing) {
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    case Primitive::kPrimNot:
+      return existing;
+    default:
+      return new_type;
+  }
+}
+
 void SsaBuilder::BuildSsa() {
   // 1) Visit in reverse post order. We need to have all predecessors of a block visited
   // (with the exception of loops) in order to create the right environment for that
@@ -32,11 +44,16 @@
     HBasicBlock* block = loop_headers_.Get(i);
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HPhi* phi = it.Current()->AsPhi();
+      Primitive::Type type = Primitive::kPrimVoid;
       for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) {
-        phi->AddInput(ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber()));
+        HInstruction* input = ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber());
+        phi->AddInput(input);
+        type = MergeTypes(type, input->GetType());
       }
+      phi->SetType(type);
     }
   }
+  // TODO: Now that the type of loop phis is set, we need a type propagation phase.
 
   // 3) Clear locals.
   // TODO: Move this to a dead code eliminator phase.
@@ -65,7 +82,6 @@
     for (size_t local = 0; local < current_locals_->Size(); local++) {
       HInstruction* incoming = ValueOfLocal(block->GetLoopInformation()->GetPreHeader(), local);
       if (incoming != nullptr) {
-        // TODO: Compute union type.
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
             GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid);
         block->AddPhi(phi);
@@ -88,12 +104,18 @@
         }
       }
       if (is_different) {
-        // TODO: Compute union type.
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
             GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
+        Primitive::Type type = Primitive::kPrimVoid;
         for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
-          phi->SetRawInputAt(i, ValueOfLocal(block->GetPredecessors().Get(i), local));
+          HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+          // We need to merge the incoming types, as the Dex format does not
+          // guarantee the inputs have the same type. In particular the 0 constant is
+          // used for all types, but the graph builder treats it as an int.
+          type = MergeTypes(type, value->GetType());
+          phi->SetRawInputAt(i, value);
         }
+        phi->SetType(type);
         block->AddPhi(phi);
         value = phi;
       }
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 938c5ec..dc4b2e5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -122,20 +122,27 @@
 void SsaLivenessAnalysis::NumberInstructions() {
   int ssa_index = 0;
   size_t lifetime_position = 0;
-  // Each instruction gets an individual lifetime position, and a block gets a lifetime
+  // Each instruction gets a lifetime position, and a block gets a lifetime
   // start and end position. Non-phi instructions have a distinct lifetime position than
   // the block they are in. Phi instructions have the lifetime start of their block as
-  // lifetime position
+  // lifetime position.
+  //
+  // Because the register allocator will insert moves in the graph, we need
+  // to differentiate between the start and end of an instruction. Adding 2 to
+  // the lifetime position for each instruction ensures the start of an
+  // instruction is different than the end of the previous instruction.
   for (HLinearOrderIterator it(linear_post_order_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
-    block->SetLifetimeStart(++lifetime_position);
+    block->SetLifetimeStart(lifetime_position);
+    lifetime_position += 2;
 
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       if (current->HasUses()) {
         instructions_from_ssa_index_.Add(current);
         current->SetSsaIndex(ssa_index++);
-        current->SetLiveInterval(new (graph_.GetArena()) LiveInterval(graph_.GetArena()));
+        current->SetLiveInterval(
+            new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType()));
       }
       current->SetLifetimePosition(lifetime_position);
     }
@@ -145,12 +152,14 @@
       if (current->HasUses()) {
         instructions_from_ssa_index_.Add(current);
         current->SetSsaIndex(ssa_index++);
-        current->SetLiveInterval(new (graph_.GetArena()) LiveInterval(graph_.GetArena()));
+        current->SetLiveInterval(
+            new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType()));
       }
-      current->SetLifetimePosition(++lifetime_position);
+      current->SetLifetimePosition(lifetime_position);
+      lifetime_position += 2;
     }
 
-    block->SetLifetimeEnd(++lifetime_position);
+    block->SetLifetimeEnd(lifetime_position);
   }
   number_of_ssa_values_ = ssa_index;
 }
@@ -190,7 +199,11 @@
       live_in->Union(GetLiveInSet(*successor));
       size_t phi_input_index = successor->GetPredecessorIndexOf(block);
       for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) {
-        HInstruction* input = it.Current()->InputAt(phi_input_index);
+        HInstruction* phi = it.Current();
+        HInstruction* input = phi->InputAt(phi_input_index);
+        input->GetLiveInterval()->AddPhiUse(phi, block);
+        // A phi input whose last user is the phi dies at the end of the predecessor block,
+        // and not at the phi's lifetime position.
         live_in->SetBit(input->GetSsaIndex());
       }
     }
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 2d91436..4d56e1f 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -48,21 +48,68 @@
  * A live range contains the start and end of a range where an instruction
  * is live.
  */
-class LiveRange : public ValueObject {
+class LiveRange : public ArenaObject {
  public:
-  LiveRange(size_t start, size_t end) : start_(start), end_(end) {
+  LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
     DCHECK_LT(start, end);
+    DCHECK(next_ == nullptr || next_->GetStart() > GetEnd());
   }
 
   size_t GetStart() const { return start_; }
   size_t GetEnd() const { return end_; }
+  LiveRange* GetNext() const { return next_; }
+
+  bool IntersectsWith(const LiveRange& other) {
+    return (start_ >= other.start_ && start_ < other.end_)
+        || (other.start_ >= start_ && other.start_ < end_);
+  }
+
+  bool IsBefore(const LiveRange& other) {
+    return end_ <= other.start_;
+  }
+
+  void Dump(std::ostream& stream) {
+    stream << "[" << start_ << ", " << end_ << ")";
+  }
 
  private:
   size_t start_;
   size_t end_;
+  LiveRange* next_;
+
+  friend class LiveInterval;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRange);
 };
 
-static constexpr int kDefaultNumberOfRanges = 3;
+/**
+ * A use position represents a live interval use at a given position.
+ */
+class UsePosition : public ArenaObject {
+ public:
+  UsePosition(HInstruction* user, size_t position, UsePosition* next)
+      : user_(user), position_(position), next_(next) {
+    DCHECK(user->AsPhi() != nullptr || GetPosition() == user->GetLifetimePosition());
+    DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition());
+  }
+
+  size_t GetPosition() const { return position_; }
+
+  UsePosition* GetNext() const { return next_; }
+
+  HInstruction* GetUser() const { return user_; }
+
+  void Dump(std::ostream& stream) {
+    stream << position_;
+  }
+
+ private:
+  HInstruction* const user_;
+  const size_t position_;
+  UsePosition* const next_;
+
+  DISALLOW_COPY_AND_ASSIGN(UsePosition);
+};
 
 /**
  * An interval is a list of disjoint live ranges where an instruction is live.
@@ -70,67 +117,276 @@
  */
 class LiveInterval : public ArenaObject {
  public:
-  explicit LiveInterval(ArenaAllocator* allocator) : ranges_(allocator, kDefaultNumberOfRanges) {}
+  LiveInterval(ArenaAllocator* allocator, Primitive::Type type)
+      : allocator_(allocator),
+        first_range_(nullptr),
+        last_range_(nullptr),
+        first_use_(nullptr),
+        type_(type),
+        next_sibling_(nullptr),
+        register_(kNoRegister) {}
 
   void AddUse(HInstruction* instruction) {
     size_t position = instruction->GetLifetimePosition();
     size_t start_block_position = instruction->GetBlock()->GetLifetimeStart();
     size_t end_block_position = instruction->GetBlock()->GetLifetimeEnd();
-    if (ranges_.IsEmpty()) {
+    if (first_range_ == nullptr) {
       // First time we see a use of that interval.
-      ranges_.Add(LiveRange(start_block_position, position));
-    } else if (ranges_.Peek().GetStart() == start_block_position) {
+      first_range_ = last_range_ = new (allocator_) LiveRange(start_block_position, position, nullptr);
+    } else if (first_range_->GetStart() == start_block_position) {
       // There is a use later in the same block.
-      DCHECK_LE(position, ranges_.Peek().GetEnd());
-    } else if (ranges_.Peek().GetStart() == end_block_position + 1) {
-      // Last use is in a following block.
-      LiveRange existing = ranges_.Pop();
-      ranges_.Add(LiveRange(start_block_position, existing.GetEnd()));
+      DCHECK_LE(position, first_range_->GetEnd());
+    } else if (first_range_->GetStart() == end_block_position) {
+      // Last use is in the following block.
+      first_range_->start_ = start_block_position;
     } else {
       // There is a hole in the interval. Create a new range.
-      ranges_.Add(LiveRange(start_block_position, position));
+      first_range_ = new (allocator_) LiveRange(start_block_position, position, first_range_);
     }
+    first_use_ = new (allocator_) UsePosition(instruction, position, first_use_);
+  }
+
+  void AddPhiUse(HInstruction* instruction, HBasicBlock* block) {
+    DCHECK(instruction->AsPhi() != nullptr);
+    first_use_ = new (allocator_) UsePosition(instruction, block->GetLifetimeEnd(), first_use_);
   }
 
   void AddRange(size_t start, size_t end) {
-    if (ranges_.IsEmpty()) {
-      ranges_.Add(LiveRange(start, end));
-    } else if (ranges_.Peek().GetStart() == end + 1) {
+    if (first_range_ == nullptr) {
+      first_range_ = last_range_ = new (allocator_) LiveRange(start, end, first_range_);
+    } else if (first_range_->GetStart() == end) {
       // There is a use in the following block.
-      LiveRange existing = ranges_.Pop();
-      ranges_.Add(LiveRange(start, existing.GetEnd()));
+      first_range_->start_ = start;
     } else {
       // There is a hole in the interval. Create a new range.
-      ranges_.Add(LiveRange(start, end));
+      first_range_ = new (allocator_) LiveRange(start, end, first_range_);
     }
   }
 
   void AddLoopRange(size_t start, size_t end) {
-    DCHECK(!ranges_.IsEmpty());
-    while (!ranges_.IsEmpty() && ranges_.Peek().GetEnd() < end) {
-      DCHECK_LE(start, ranges_.Peek().GetStart());
-      ranges_.Pop();
+    DCHECK(first_range_ != nullptr);
+    while (first_range_ != nullptr && first_range_->GetEnd() < end) {
+      DCHECK_LE(start, first_range_->GetStart());
+      first_range_ = first_range_->GetNext();
     }
-    if (ranges_.IsEmpty()) {
+    if (first_range_ == nullptr) {
       // Uses are only in the loop.
-      ranges_.Add(LiveRange(start, end));
+      first_range_ = last_range_ = new (allocator_) LiveRange(start, end, nullptr);
     } else {
       // There are uses after the loop.
-      LiveRange range = ranges_.Pop();
-      ranges_.Add(LiveRange(start, range.GetEnd()));
+      first_range_->start_ = start;
     }
   }
 
   void SetFrom(size_t from) {
-    DCHECK(!ranges_.IsEmpty());
-    LiveRange existing = ranges_.Pop();
-    ranges_.Add(LiveRange(from, existing.GetEnd()));
+    DCHECK(first_range_ != nullptr);
+    first_range_->start_ = from;
   }
 
-  const GrowableArray<LiveRange>& GetRanges() const { return ranges_; }
+  LiveRange* GetFirstRange() const { return first_range_; }
+
+  int GetRegister() const { return register_; }
+  void SetRegister(int reg) { register_ = reg; }
+  void ClearRegister() { register_ = kNoRegister; }
+  bool HasRegister() const { return register_ != kNoRegister; }
+
+  bool IsDeadAt(size_t position) {
+    return last_range_->GetEnd() <= position;
+  }
+
+  bool Covers(size_t position) {
+    LiveRange* current = first_range_;
+    while (current != nullptr) {
+      if (position >= current->GetStart() && position < current->GetEnd()) {
+        return true;
+      }
+      current = current->GetNext();
+    }
+    return false;
+  }
+
+  /**
+   * Returns the first intersection of this interval with `other`.
+   */
+  size_t FirstIntersectionWith(LiveInterval* other) {
+    // We only call this method if there is a lifetime hole in this interval
+    // at the start of `other`.
+    DCHECK(!Covers(other->GetStart()));
+    DCHECK_LE(GetStart(), other->GetStart());
+    // Move to the range in this interval that starts after the other interval.
+    size_t other_start = other->GetStart();
+    LiveRange* my_range = first_range_;
+    while (my_range != nullptr) {
+      if (my_range->GetStart() >= other_start) {
+        break;
+      } else {
+        my_range = my_range->GetNext();
+      }
+    }
+    if (my_range == nullptr) {
+      return kNoLifetime;
+    }
+
+    // Advance both intervals and find the first matching range start in
+    // this interval.
+    LiveRange* other_range = other->first_range_;
+    do {
+      if (my_range->IntersectsWith(*other_range)) {
+        return std::max(my_range->GetStart(), other_range->GetStart());
+      } else if (my_range->IsBefore(*other_range)) {
+        my_range = my_range->GetNext();
+        if (my_range == nullptr) {
+          return kNoLifetime;
+        }
+      } else {
+        DCHECK(other_range->IsBefore(*my_range));
+        other_range = other_range->GetNext();
+        if (other_range == nullptr) {
+          return kNoLifetime;
+        }
+      }
+    } while (true);
+  }
+
+  size_t GetStart() const {
+    return first_range_->GetStart();
+  }
+
+  size_t FirstRegisterUseAfter(size_t position) const {
+    UsePosition* use = first_use_;
+    while (use != nullptr) {
+      size_t use_position = use->GetPosition();
+      // TODO: Once we plug the Locations builder of the code generator
+      // to the register allocator, this method must be adjusted. We
+      // test if there is an environment, because these are currently the only
+      // instructions that could have more uses than the number of registers.
+      if (use_position >= position && !use->GetUser()->NeedsEnvironment()) {
+        return use_position;
+      }
+      use = use->GetNext();
+    }
+    return kNoLifetime;
+  }
+
+  size_t FirstRegisterUse() const {
+    return FirstRegisterUseAfter(GetStart());
+  }
+
+  Primitive::Type GetType() const {
+    return type_;
+  }
+
+  /**
+   * Split this interval at `position`. This interval is changed to:
+   * [start ... position).
+   *
+   * The new interval covers:
+   * [position ... end)
+   */
+  LiveInterval* SplitAt(size_t position) {
+    DCHECK(next_sibling_ == nullptr);
+    DCHECK_GT(position, GetStart());
+
+    if (last_range_->GetEnd() <= position) {
+      // This range dies before `position`, no need to split.
+      return nullptr;
+    }
+
+    LiveInterval* new_interval = new (allocator_) LiveInterval(allocator_, type_);
+    next_sibling_ = new_interval;
+
+    new_interval->first_use_ = first_use_;
+    LiveRange* current = first_range_;
+    LiveRange* previous = nullptr;
+    // Iterate over the ranges, and either find a range that covers this position, or
+    // a two ranges in between this position (that is, the position is in a lifetime hole).
+    do {
+      if (position >= current->GetEnd()) {
+        // Move to next range.
+        previous = current;
+        current = current->next_;
+      } else if (position <= current->GetStart()) {
+        // If the previous range did not cover this position, we know position is in
+        // a lifetime hole. We can just break the first_range_ and last_range_ links
+        // and return the new interval.
+        DCHECK(previous != nullptr);
+        DCHECK(current != first_range_);
+        new_interval->last_range_ = last_range_;
+        last_range_ = previous;
+        previous->next_ = nullptr;
+        new_interval->first_range_ = current;
+        return new_interval;
+      } else {
+        // This range covers position. We create a new last_range_ for this interval
+        // that covers last_range_->Start() and position. We also shorten the current
+        // range and make it the first range of the new interval.
+        DCHECK(position < current->GetEnd() && position > current->GetStart());
+        new_interval->last_range_ = last_range_;
+        last_range_ = new (allocator_) LiveRange(current->start_, position, nullptr);
+        if (previous != nullptr) {
+          previous->next_ = last_range_;
+        } else {
+          first_range_ = last_range_;
+        }
+        new_interval->first_range_ = current;
+        current->start_ = position;
+        return new_interval;
+      }
+    } while (current != nullptr);
+
+    LOG(FATAL) << "Unreachable";
+    return nullptr;
+  }
+
+  bool StartsBefore(LiveInterval* other) const {
+    return GetStart() <= other->GetStart();
+  }
+
+  bool StartsAfter(LiveInterval* other) const {
+    return GetStart() >= other->GetStart();
+  }
+
+  void Dump(std::ostream& stream) const {
+    stream << "ranges: { ";
+    LiveRange* current = first_range_;
+    do {
+      current->Dump(stream);
+      stream << " ";
+    } while ((current = current->GetNext()) != nullptr);
+    stream << "}, uses: { ";
+    UsePosition* use = first_use_;
+    if (use != nullptr) {
+      do {
+        use->Dump(stream);
+        stream << " ";
+      } while ((use = use->GetNext()) != nullptr);
+    }
+    stream << "}";
+  }
+
+  LiveInterval* GetNextSibling() const { return next_sibling_; }
 
  private:
-  GrowableArray<LiveRange> ranges_;
+  ArenaAllocator* const allocator_;
+
+  // Ranges of this interval. We need a quick access to the last range to test
+  // for liveness (see `IsDeadAt`).
+  LiveRange* first_range_;
+  LiveRange* last_range_;
+
+  // Uses of this interval. Note that this linked list is shared amongst siblings.
+  UsePosition* first_use_;
+
+  // The instruction type this interval corresponds to.
+  const Primitive::Type type_;
+
+  // Live interval that is the result of a split.
+  LiveInterval* next_sibling_;
+
+  // The register allocated to this interval.
+  int register_;
+
+  static constexpr int kNoRegister = -1;
 
   DISALLOW_COPY_AND_ASSIGN(LiveInterval);
 };
@@ -164,10 +420,14 @@
     return linear_post_order_;
   }
 
-  HInstruction* GetInstructionFromSsaIndex(size_t index) {
+  HInstruction* GetInstructionFromSsaIndex(size_t index) const {
     return instructions_from_ssa_index_.Get(index);
   }
 
+  size_t GetNumberOfSsaValues() const {
+    return number_of_ssa_values_;
+  }
+
  private:
   // Linearize the graph so that:
   // (1): a block is always after its dominator,
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 032eabc..dbe482d 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -170,6 +170,10 @@
     return ret;
   }
 
+  template <typename T> T* AllocArray(size_t length) {
+    return static_cast<T*>(Alloc(length * sizeof(T), kArenaAllocMisc));
+  }
+
   void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
   void ObtainNewArenaForAllocation(size_t allocation_size);
   size_t BytesAllocated() const;
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index e703d8e..a1a3312 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -169,6 +169,13 @@
       num_used_--;
     };
 
+    void DeleteAt(size_t index) {
+      for (size_t i = index; i < num_used_ - 1; i++) {
+        elem_list_[i] = elem_list_[i + 1];
+      }
+      num_used_--;
+    };
+
     size_t GetNumAllocated() const { return num_allocated_; }
 
     size_t Size() const { return num_used_; }
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 034a795..021fe88 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -95,11 +95,11 @@
   if (!IsValidManagedRegister()) {
     os << "No Register";
   } else if (IsXmmRegister()) {
-    os << "XMM: " << static_cast<int>(AsXmmRegister());
+    os << "XMM: " << AsXmmRegister();
   } else if (IsX87Register()) {
-    os << "X87: " << static_cast<int>(AsX87Register());
+    os << "X87: " << AsX87Register();
   } else if (IsCpuRegister()) {
-    os << "CPU: " << static_cast<int>(AsCpuRegister());
+    os << "CPU: " << AsCpuRegister();
   } else if (IsRegisterPair()) {
     os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
   } else {