Update V8 to version 4.1.0.21

This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.

Original commit message:

Version 4.1.0.21 (cherry-pick)

Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412

Unlink pages from the space page list after evacuation.

BUG=430201
LOG=N
R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/953813002

Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}

---

FPIIM-449

Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index ac9cfa8..8c8e530 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -11,43 +11,73 @@
 
 // static
 FieldAccess AccessBuilder::ForMap() {
-  return {kTaggedBase, HeapObject::kMapOffset, Handle<Name>(), Type::Any(),
+  return {kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(), Type::Any(),
           kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
-  return {kTaggedBase, JSObject::kPropertiesOffset, Handle<Name>(), Type::Any(),
-          kMachAnyTagged};
+  return {kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+          Type::Any(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
-  return {kTaggedBase, JSObject::kElementsOffset, Handle<Name>(),
+  return {kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
           Type::Internal(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionContext() {
-  return {kTaggedBase, JSFunction::kContextOffset, Handle<Name>(),
+  return {kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
           Type::Internal(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, Handle<Name>(),
-          Type::UntaggedPtr(), kMachPtr};
+  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
+          Type::UntaggedPointer(), kMachPtr};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForExternalArrayPointer() {
-  return {kTaggedBase, ExternalArray::kExternalPointerOffset, Handle<Name>(),
-          Type::UntaggedPtr(), kMachPtr};
+  return {kTaggedBase, ExternalArray::kExternalPointerOffset,
+          MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapInstanceType() {
+  return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+          Type::UntaggedUnsigned8(), kMachUint8};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForStringLength() {
+  return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+          Type::SignedSmall(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForValue() {
+  return {kTaggedBase, JSValue::kValueOffset, Handle<Name>(), Type::Any(),
+          kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForContextSlot(size_t index) {
+  int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
+  DCHECK_EQ(offset,
+            Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
+  return {kTaggedBase, offset, Handle<Name>(), Type::Any(), kMachAnyTagged};
 }
 
 
@@ -58,13 +88,6 @@
 
 
 // static
-ElementAccess AccessBuilder::ForBackingStoreElement(MachineType rep) {
-  return {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-          rep};
-}
-
-
-// static
 ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
                                                   bool is_external) {
   BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
@@ -84,9 +107,9 @@
     case kExternalUint32Array:
       return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
     case kExternalFloat32Array:
-      return {taggedness, header_size, Type::Number(), kRepFloat32};
+      return {taggedness, header_size, Type::Number(), kMachFloat32};
     case kExternalFloat64Array:
-      return {taggedness, header_size, Type::Number(), kRepFloat64};
+      return {taggedness, header_size, Type::Number(), kMachFloat64};
   }
   UNREACHABLE();
   return {kUntaggedBase, 0, Type::None(), kMachNone};
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 7d0bda1..d6385e4 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -34,12 +34,21 @@
   // Provides access to ExternalArray::external_pointer() field.
   static FieldAccess ForExternalArrayPointer();
 
+  // Provides access to Map::instance_type() field.
+  static FieldAccess ForMapInstanceType();
+
+  // Provides access to String::length() field.
+  static FieldAccess ForStringLength();
+
+  // Provides access to JSValue::value() field.
+  static FieldAccess ForValue();
+
+  // Provides access Context slots.
+  static FieldAccess ForContextSlot(size_t index);
+
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
 
-  // TODO(mstarzinger): Raw access only for testing, drop me.
-  static ElementAccess ForBackingStoreElement(MachineType rep);
-
   // Provides access to Fixed{type}TypedArray and External{type}Array elements.
   static ElementAccess ForTypedArrayElement(ExternalArrayType type,
                                             bool is_external);
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 1ec174d..cfa4de9 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -22,11 +22,35 @@
 
 
 // Adds Arm-specific methods to convert InstructionOperands.
-class ArmOperandConverter : public InstructionOperandConverter {
+class ArmOperandConverter FINAL : public InstructionOperandConverter {
  public:
   ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
       : InstructionOperandConverter(gen, instr) {}
 
+  SwVfpRegister OutputFloat32Register(int index = 0) {
+    return ToFloat32Register(instr_->OutputAt(index));
+  }
+
+  SwVfpRegister InputFloat32Register(int index) {
+    return ToFloat32Register(instr_->InputAt(index));
+  }
+
+  SwVfpRegister ToFloat32Register(InstructionOperand* op) {
+    return ToFloat64Register(op).low();
+  }
+
+  LowDwVfpRegister OutputFloat64Register(int index = 0) {
+    return ToFloat64Register(instr_->OutputAt(index));
+  }
+
+  LowDwVfpRegister InputFloat64Register(int index) {
+    return ToFloat64Register(instr_->InputAt(index));
+  }
+
+  LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
+    return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
+  }
+
   SBit OutputSBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
@@ -44,12 +68,16 @@
     switch (constant.type()) {
       case Constant::kInt32:
         return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
       case Constant::kFloat64:
         return Operand(
             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
       case Constant::kInt64:
       case Constant::kExternalReference:
       case Constant::kHeapObject:
+      case Constant::kRpoNumber:
         break;
     }
     UNREACHABLE();
@@ -114,9 +142,8 @@
     return MemOperand(r0);
   }
 
-  MemOperand InputOffset() {
-    int index = 0;
-    return InputOffset(&index);
+  MemOperand InputOffset(int first_index = 0) {
+    return InputOffset(&first_index);
   }
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
@@ -131,6 +158,112 @@
 };
 
 
+namespace {
+
+class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  SwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+  }
+
+ private:
+  DwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, Operand::Zero()); }
+
+ private:
+  Register const result_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                           \
+  do {                                                               \
+    auto result = i.OutputFloat##width##Register();                  \
+    auto offset = i.InputRegister(0);                                \
+    if (instr->InputAt(1)->IsRegister()) {                           \
+      __ cmp(offset, i.InputRegister(1));                            \
+    } else {                                                         \
+      __ cmp(offset, i.InputImmediate(1));                           \
+    }                                                                \
+    auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
+    __ b(hs, ool->entry());                                          \
+    __ vldr(result, i.InputOffset(2));                               \
+    __ bind(ool->exit());                                            \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                              \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                \
+  do {                                                          \
+    auto result = i.OutputRegister();                           \
+    auto offset = i.InputRegister(0);                           \
+    if (instr->InputAt(1)->IsRegister()) {                      \
+      __ cmp(offset, i.InputRegister(1));                       \
+    } else {                                                    \
+      __ cmp(offset, i.InputImmediate(1));                      \
+    }                                                           \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+    __ b(hs, ool->entry());                                     \
+    __ asm_instr(result, i.InputOffset(2));                     \
+    __ bind(ool->exit());                                       \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                         \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width)        \
+  do {                                             \
+    auto offset = i.InputRegister(0);              \
+    if (instr->InputAt(1)->IsRegister()) {         \
+      __ cmp(offset, i.InputRegister(1));          \
+    } else {                                       \
+      __ cmp(offset, i.InputImmediate(1));         \
+    }                                              \
+    auto value = i.InputFloat##width##Register(2); \
+    __ vstr(value, i.InputOffset(3), lo);          \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());            \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+  do {                                            \
+    auto offset = i.InputRegister(0);             \
+    if (instr->InputAt(1)->IsRegister()) {        \
+      __ cmp(offset, i.InputRegister(1));         \
+    } else {                                      \
+      __ cmp(offset, i.InputImmediate(1));        \
+    }                                             \
+    auto value = i.InputRegister(2);              \
+    __ asm_instr(value, i.InputOffset(3), lo);    \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());           \
+  } while (0)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   ArmOperandConverter i(this, instr);
@@ -166,7 +299,7 @@
       break;
     }
     case kArchJmp:
-      __ b(code_->GetLabel(i.InputBlock(0)));
+      AssembleArchJump(i.InputRpo(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchNop:
@@ -177,8 +310,12 @@
       AssembleReturn();
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), sp);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmAdd:
@@ -208,6 +345,19 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSmmul:
+      __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSmmla:
+      __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUmull:
+      __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+               i.InputRegister(1), i.OutputSBit());
+      break;
     case kArmSdiv: {
       CpuFeatureScope scope(masm(), SUDIV);
       __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -255,6 +405,42 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSxtb:
+      __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxth:
+      __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtab:
+      __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtah:
+      __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtb:
+      __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxth:
+      __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtab:
+      __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtah:
+      __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArmCmp:
       __ cmp(i.InputRegister(0), i.InputOperand2(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
@@ -272,38 +458,38 @@
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVcmpF64:
-      __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
-                               i.InputDoubleRegister(1));
+      __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
+                               i.InputFloat64Register(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
     case kArmVaddF64:
-      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-              i.InputDoubleRegister(1));
+      __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
+              i.InputFloat64Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVsubF64:
-      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-              i.InputDoubleRegister(1));
+      __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
+              i.InputFloat64Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmulF64:
-      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-              i.InputDoubleRegister(1));
+      __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
+              i.InputFloat64Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlaF64:
-      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-              i.InputDoubleRegister(2));
+      __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
+              i.InputFloat64Register(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmlsF64:
-      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-              i.InputDoubleRegister(2));
+      __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
+              i.InputFloat64Register(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVdivF64:
-      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-              i.InputDoubleRegister(1));
+      __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
+              i.InputFloat64Register(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArmVmodF64: {
@@ -311,45 +497,67 @@
       // and generate a CallAddress instruction instead.
       FrameScope scope(masm(), StackFrame::MANUAL);
       __ PrepareCallCFunction(0, 2, kScratchReg);
-      __ MovToFloatParameters(i.InputDoubleRegister(0),
-                              i.InputDoubleRegister(1));
+      __ MovToFloatParameters(i.InputFloat64Register(0),
+                              i.InputFloat64Register(1));
       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
                        0, 2);
       // Move the result in the double result register.
-      __ MovFromFloatResult(i.OutputDoubleRegister());
+      __ MovFromFloatResult(i.OutputFloat64Register());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArmVnegF64:
-      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
-      break;
     case kArmVsqrtF64:
-      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
       break;
+    case kArmVfloorF64:
+      __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVceilF64:
+      __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTruncateF64:
+      __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTiesAwayF64:
+      __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVnegF64:
+      __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVcvtF32F64: {
+      __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF64F32: {
+      __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmVcvtF64S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+      __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtF64U32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
-      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+      __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtS32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+      __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
     case kArmVcvtU32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+      __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
       __ vmov(i.OutputRegister(), scratch);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
@@ -392,30 +600,26 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArmVldr32: {
-      SwVfpRegister scratch = kScratchDoubleReg.low();
-      __ vldr(scratch, i.InputOffset());
-      __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+    case kArmVldrF32: {
+      __ vldr(i.OutputFloat32Register(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArmVstr32: {
+    case kArmVstrF32: {
       int index = 0;
-      SwVfpRegister scratch = kScratchDoubleReg.low();
       MemOperand operand = i.InputOffset(&index);
-      __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
-      __ vstr(scratch, operand);
+      __ vstr(i.InputFloat32Register(index), operand);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArmVldr64:
-      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+    case kArmVldrF64:
+      __ vldr(i.OutputFloat64Register(), i.InputOffset());
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
-    case kArmVstr64: {
+    case kArmVstrF64: {
       int index = 0;
       MemOperand operand = i.InputOffset(&index);
-      __ vstr(i.InputDoubleRegister(index), operand);
+      __ vstr(i.InputFloat64Register(index), operand);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
@@ -436,33 +640,62 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(strb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(strh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(str);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(32);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(64);
+      break;
   }
 }
 
 
 // Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   ArmOperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = code()->GetLabel(tblock);
-  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
-  switch (condition) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "eq" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kEqual:
       __ b(eq, tlabel);
       break;
     case kUnorderedNotEqual:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered or not equal can be tested with "ne" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kNotEqual:
       __ b(ne, tlabel);
       break;
@@ -479,26 +712,28 @@
       __ b(gt, tlabel);
       break;
     case kUnorderedLessThan:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "lo" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kUnsignedLessThan:
       __ b(lo, tlabel);
       break;
     case kUnorderedGreaterThanOrEqual:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered, greater than or equal can be tested with "hs" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kUnsignedGreaterThanOrEqual:
       __ b(hs, tlabel);
       break;
     case kUnorderedLessThanOrEqual:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "ls" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kUnsignedLessThanOrEqual:
       __ b(ls, tlabel);
       break;
     case kUnorderedGreaterThan:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered or greater than can be tested with "hi" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kUnsignedGreaterThan:
       __ b(hi, tlabel);
       break;
@@ -509,8 +744,12 @@
       __ b(vc, tlabel);
       break;
   }
-  if (!fallthru) __ b(flabel);  // no fallthru to flabel.
-  __ bind(&done);
+  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
 
@@ -634,28 +873,10 @@
       __ stm(db_w, sp, saves);
     }
   } else if (descriptor->IsJSFunctionCall()) {
-    CompilationInfo* info = linkage()->info();
+    CompilationInfo* info = this->info();
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
-      __ b(ne, &ok);
-      __ ldr(r2, GlobalObjectOperand());
-      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-      __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame()->SetRegisterSaveAreaSize(
@@ -720,10 +941,10 @@
       __ str(temp, g.ToMemOperand(destination));
     }
   } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
     if (destination->IsRegister() || destination->IsStackSlot()) {
       Register dst =
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
-      Constant src = g.ToConstant(source);
       switch (src.type()) {
         case Constant::kInt32:
           __ mov(dst, Operand(src.ToInt32()));
@@ -731,6 +952,10 @@
         case Constant::kInt64:
           UNREACHABLE();
           break;
+        case Constant::kFloat32:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
         case Constant::kFloat64:
           __ Move(dst,
                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -741,16 +966,29 @@
         case Constant::kHeapObject:
           __ Move(dst, src.ToHeapObject());
           break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): loading RPO constants on arm.
+          break;
       }
       if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
-    } else if (destination->IsDoubleRegister()) {
-      DwVfpRegister result = g.ToDoubleRegister(destination);
-      __ vmov(result, g.ToDouble(source));
+    } else if (src.type() == Constant::kFloat32) {
+      if (destination->IsDoubleStackSlot()) {
+        MemOperand dst = g.ToMemOperand(destination);
+        __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ str(ip, dst);
+      } else {
+        SwVfpRegister dst = g.ToFloat32Register(destination);
+        __ vmov(dst, src.ToFloat32());
+      }
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
-      DwVfpRegister temp = kScratchDoubleReg;
-      __ vmov(temp, g.ToDouble(source));
-      __ vstr(temp, g.ToMemOperand(destination));
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      DwVfpRegister dst = destination->IsDoubleRegister()
+                              ? g.ToFloat64Register(destination)
+                              : kScratchDoubleReg;
+      __ vmov(dst, src.ToFloat64(), kScratchReg);
+      if (destination->IsDoubleStackSlot()) {
+        __ vstr(dst, g.ToMemOperand(destination));
+      }
     }
   } else if (source->IsDoubleRegister()) {
     DwVfpRegister src = g.ToDoubleRegister(source);
@@ -851,7 +1089,7 @@
 
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
+  if (!info()->IsStub()) {
     // Ensure that we have enough space after the previous lazy-bailout
     // instruction for patching the code here.
     int current_pc = masm()->pc_offset();
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 7849ca9..ecd0b2d 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -26,12 +26,23 @@
   V(ArmMul)                        \
   V(ArmMla)                        \
   V(ArmMls)                        \
+  V(ArmSmmul)                      \
+  V(ArmSmmla)                      \
+  V(ArmUmull)                      \
   V(ArmSdiv)                       \
   V(ArmUdiv)                       \
   V(ArmMov)                        \
   V(ArmMvn)                        \
   V(ArmBfc)                        \
   V(ArmUbfx)                       \
+  V(ArmSxtb)                       \
+  V(ArmSxth)                       \
+  V(ArmSxtab)                      \
+  V(ArmSxtah)                      \
+  V(ArmUxtb)                       \
+  V(ArmUxth)                       \
+  V(ArmUxtab)                      \
+  V(ArmUxtah)                      \
   V(ArmVcmpF64)                    \
   V(ArmVaddF64)                    \
   V(ArmVsubF64)                    \
@@ -42,14 +53,20 @@
   V(ArmVmodF64)                    \
   V(ArmVnegF64)                    \
   V(ArmVsqrtF64)                   \
+  V(ArmVfloorF64)                  \
+  V(ArmVceilF64)                   \
+  V(ArmVroundTruncateF64)          \
+  V(ArmVroundTiesAwayF64)          \
+  V(ArmVcvtF32F64)                 \
+  V(ArmVcvtF64F32)                 \
   V(ArmVcvtF64S32)                 \
   V(ArmVcvtF64U32)                 \
   V(ArmVcvtS32F64)                 \
   V(ArmVcvtU32F64)                 \
-  V(ArmVldr32)                     \
-  V(ArmVstr32)                     \
-  V(ArmVldr64)                     \
-  V(ArmVstr64)                     \
+  V(ArmVldrF32)                    \
+  V(ArmVstrF32)                    \
+  V(ArmVldrF64)                    \
+  V(ArmVstrF64)                    \
   V(ArmLdrb)                       \
   V(ArmLdrsb)                      \
   V(ArmStrb)                       \
diff --git a/src/compiler/arm/instruction-selector-arm-unittest.cc b/src/compiler/arm/instruction-selector-arm-unittest.cc
deleted file mode 100644
index 208d2e9..0000000
--- a/src/compiler/arm/instruction-selector-arm-unittest.cc
+++ /dev/null
@@ -1,1900 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
-
-
-// Data processing instructions.
-struct DPI {
-  Constructor constructor;
-  const char* constructor_name;
-  ArchOpcode arch_opcode;
-  ArchOpcode reverse_arch_opcode;
-  ArchOpcode test_arch_opcode;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
-  return os << dpi.constructor_name;
-}
-
-
-static const DPI kDPIs[] = {
-    {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
-    {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
-    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
-    {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
-    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
-
-
-// Data processing instructions with overflow.
-struct ODPI {
-  Constructor constructor;
-  const char* constructor_name;
-  ArchOpcode arch_opcode;
-  ArchOpcode reverse_arch_opcode;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
-  return os << odpi.constructor_name;
-}
-
-
-static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
-                               "Int32AddWithOverflow", kArmAdd, kArmAdd},
-                              {&RawMachineAssembler::Int32SubWithOverflow,
-                               "Int32SubWithOverflow", kArmSub, kArmRsb}};
-
-
-// Shifts.
-struct Shift {
-  Constructor constructor;
-  const char* constructor_name;
-  int32_t i_low;          // lowest possible immediate
-  int32_t i_high;         // highest possible immediate
-  AddressingMode i_mode;  // Operand2_R_<shift>_I
-  AddressingMode r_mode;  // Operand2_R_<shift>_R
-};
-
-
-std::ostream& operator<<(std::ostream& os, const Shift& shift) {
-  return os << shift.constructor_name;
-}
-
-
-static const Shift kShifts[] = {
-    {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
-     kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
-    {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
-     kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
-    {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
-     kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
-    {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
-     kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
-
-
-// Immediates (random subset).
-static const int32_t kImmediates[] = {
-    -2147483617, -2147483606, -2113929216, -2080374784, -1996488704,
-    -1879048192, -1459617792, -1358954496, -1342177265, -1275068414,
-    -1073741818, -1073741777, -855638016,  -805306368,  -402653184,
-    -268435444,  -16777216,   0,           35,          61,
-    105,         116,         171,         245,         255,
-    692,         1216,        1248,        1520,        1600,
-    1888,        3744,        4080,        5888,        8384,
-    9344,        9472,        9792,        13312,       15040,
-    15360,       20736,       22272,       23296,       32000,
-    33536,       37120,       45824,       47872,       56320,
-    59392,       65280,       72704,       101376,      147456,
-    161792,      164864,      167936,      173056,      195584,
-    209920,      212992,      356352,      655360,      704512,
-    716800,      851968,      901120,      1044480,     1523712,
-    2572288,     3211264,     3588096,     3833856,     3866624,
-    4325376,     5177344,     6488064,     7012352,     7471104,
-    14090240,    16711680,    19398656,    22282240,    28573696,
-    30408704,    30670848,    43253760,    54525952,    55312384,
-    56623104,    68157440,    115343360,   131072000,   187695104,
-    188743680,   195035136,   197132288,   203423744,   218103808,
-    267386880,   268435470,   285212672,   402653185,   415236096,
-    595591168,   603979776,   603979778,   629145600,   1073741835,
-    1073741855,  1073741861,  1073741884,  1157627904,  1476395008,
-    1476395010,  1610612741,  2030043136,  2080374785,  2097152000};
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Data processing instructions.
-
-
-typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
-
-
-TEST_P(InstructionSelectorDPITest, Parameters) {
-  const DPI dpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorDPITest, Immediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return((m.*dpi.constructor)(
-        m.Parameter(0),
-        (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return((m.*dpi.constructor)(
-        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
-        m.Parameter(2)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return((m.*dpi.constructor)(
-          m.Parameter(0),
-          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-    }
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return((m.*dpi.constructor)(
-          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
-          m.Parameter(1)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
-  const DPI dpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(1));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
-             &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
-             &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch((m.*dpi.constructor)(
-                 m.Parameter(0),
-                 (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch((m.*dpi.constructor)(
-                 (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
-                 m.Parameter(2)),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      MLabel a, b;
-      m.Branch((m.*dpi.constructor)(m.Parameter(0),
-                                    (m.*shift.constructor)(
-                                        m.Parameter(1), m.Int32Constant(imm))),
-               &a, &b);
-      m.Bind(&a);
-      m.Return(m.Int32Constant(1));
-      m.Bind(&b);
-      m.Return(m.Int32Constant(0));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(5U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-    }
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      MLabel a, b;
-      m.Branch((m.*dpi.constructor)(
-                   (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
-                   m.Parameter(1)),
-               &a, &b);
-      m.Bind(&a);
-      m.Return(m.Int32Constant(1));
-      m.Bind(&b);
-      m.Return(m.Int32Constant(0));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(5U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
-  const DPI dpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
-                         m.Int32Constant(0)),
-           &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(1));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kEqual, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
-  const DPI dpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  m.Branch(
-      m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
-                       m.Int32Constant(0)),
-      &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(1));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32Equal(
-                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
-                 m.Int32Constant(0)),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32Equal(
-                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
-                 m.Int32Constant(0)),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
-  const DPI dpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32NotEqual(
-                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
-                 m.Int32Constant(0)),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32NotEqual(
-                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
-                 m.Int32Constant(0)),
-             &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
-                        ::testing::ValuesIn(kDPIs));
-
-
-// -----------------------------------------------------------------------------
-// Data processing instructions with overflow.
-
-
-typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
-
-
-TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(
-      m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_LE(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        1, (m.*odpi.constructor)(
-               m.Parameter(0),
-               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        1, (m.*odpi.constructor)(
-               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
-               m.Parameter(0))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return(m.Projection(
-          1, (m.*odpi.constructor)(m.Parameter(0),
-                                   (m.*shift.constructor)(
-                                       m.Parameter(1), m.Int32Constant(imm)))));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_LE(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kOverflow, s[0]->flags_condition());
-    }
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return(m.Projection(
-          1, (m.*odpi.constructor)(
-                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
-                 m.Parameter(0))));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_LE(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kOverflow, s[0]->flags_condition());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, ValWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(
-      m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_LE(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-}
-
-
-TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        0, (m.*odpi.constructor)(
-               m.Parameter(0),
-               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        0, (m.*odpi.constructor)(
-               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
-               m.Parameter(0))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return(m.Projection(
-          0, (m.*odpi.constructor)(m.Parameter(0),
-                                   (m.*shift.constructor)(
-                                       m.Parameter(1), m.Int32Constant(imm)))));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_LE(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-    }
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      m.Return(m.Projection(
-          0, (m.*odpi.constructor)(
-                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
-                 m.Parameter(0))));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_LE(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, BothWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-  Stream s = m.Build();
-  ASSERT_LE(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(2U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    Node* n = (m.*odpi.constructor)(
-        m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    Node* n = (m.*odpi.constructor)(
-        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      Node* n = (m.*odpi.constructor)(
-          m.Parameter(0),
-          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
-      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-      Stream s = m.Build();
-      ASSERT_LE(1U, s.size());
-      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(2U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kOverflow, s[0]->flags_condition());
-    }
-  }
-  TRACED_FOREACH(Shift, shift, kShifts) {
-    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-      Node* n = (m.*odpi.constructor)(
-          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
-          m.Parameter(1));
-      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-      Stream s = m.Build();
-      ASSERT_LE(1U, s.size());
-      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-      EXPECT_EQ(2U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kOverflow, s[0]->flags_condition());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Branch(m.Projection(1, n), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(0));
-  m.Bind(&b);
-  m.Return(m.Projection(0, n));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
-  const ODPI odpi = GetParam();
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
-    m.Branch(m.Projection(1, n), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(0));
-    m.Bind(&b);
-    m.Return(m.Projection(0, n));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(4U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
-    m.Branch(m.Projection(1, n), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(0));
-    m.Bind(&b);
-    m.Return(m.Projection(0, n));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(4U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Projection(0, n));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
-  const ODPI odpi = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  MLabel a, b;
-  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Projection(0, n));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
-                        ::testing::ValuesIn(kODPIs));
-
-
-// -----------------------------------------------------------------------------
-// Shifts.
-
-
-typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
-
-
-TEST_P(InstructionSelectorShiftTest, Parameters) {
-  const Shift shift = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
-  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Immediate) {
-  const Shift shift = GetParam();
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
-  const Shift shift = GetParam();
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(
-        m.Word32Equal(m.Parameter(0),
-                      (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(
-        m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
-                      m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
-  const Shift shift = GetParam();
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(
-        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
-        m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(
-        m.Parameter(0),
-        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
-  const Shift shift = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(
-      m.Word32Equal(m.Int32Constant(0),
-                    (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
-  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(2U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kEqual, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
-  const Shift shift = GetParam();
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(
-        m.Int32Constant(0),
-        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
-  const Shift shift = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
-  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
-  const Shift shift = GetParam();
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Not(
-        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
-  const Shift shift = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
-                                           m.Parameter(1), m.Parameter(2)))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmBic, s[0]->arch_opcode());
-  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
-  EXPECT_EQ(3U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
-  const Shift shift = GetParam();
-  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32And(m.Parameter(0),
-                         m.Word32Not((m.*shift.constructor)(
-                             m.Parameter(1), m.Int32Constant(imm)))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
-    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
-                        ::testing::ValuesIn(kShifts));
-
-
-// -----------------------------------------------------------------------------
-// Memory access instructions.
-
-
-namespace {
-
-struct MemoryAccess {
-  MachineType type;
-  ArchOpcode ldr_opcode;
-  ArchOpcode str_opcode;
-  bool (InstructionSelectorTest::Stream::*val_predicate)(
-      const InstructionOperand*) const;
-  const int32_t immediates[40];
-};
-
-
-std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
-  OStringStream ost;
-  ost << memacc.type;
-  return os << ost.c_str();
-}
-
-
-static const MemoryAccess kMemoryAccesses[] = {
-    {kMachInt8,
-     kArmLdrsb,
-     kArmStrb,
-     &InstructionSelectorTest::Stream::IsInteger,
-     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
-      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
-    {kMachUint8,
-     kArmLdrb,
-     kArmStrb,
-     &InstructionSelectorTest::Stream::IsInteger,
-     {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-      -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
-      39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
-    {kMachInt16,
-     kArmLdrsh,
-     kArmStrh,
-     &InstructionSelectorTest::Stream::IsInteger,
-     {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-      -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
-      102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
-    {kMachUint16,
-     kArmLdrh,
-     kArmStrh,
-     &InstructionSelectorTest::Stream::IsInteger,
-     {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-      -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
-      114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
-    {kMachInt32,
-     kArmLdr,
-     kArmStr,
-     &InstructionSelectorTest::Stream::IsInteger,
-     {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-      -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
-      93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
-    {kMachFloat32,
-     kArmVldr32,
-     kArmVstr32,
-     &InstructionSelectorTest::Stream::IsDouble,
-     {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
-      -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
-      24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
-    {kMachFloat64,
-     kArmVldr64,
-     kArmVstr64,
-     &InstructionSelectorTest::Stream::IsDouble,
-     {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
-      -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
-      108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
-
-}  // namespace
-
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
-    InstructionSelectorMemoryAccessTest;
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
-  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, memacc.immediates) {
-    StreamBuilder m(this, memacc.type, kMachPtr);
-    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    ASSERT_EQ(1U, s[0]->OutputCount());
-    EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
-  }
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
-  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
-  EXPECT_EQ(3U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, memacc.immediates) {
-    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
-    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
-            m.Parameter(1));
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(0U, s[0]->OutputCount());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorMemoryAccessTest,
-                        ::testing::ValuesIn(kMemoryAccesses));
-
-
-// -----------------------------------------------------------------------------
-// Miscellaneous.
-
-
-TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(
-        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(
-        m.Int32Add(m.Int32Mul(m.Parameter(1), m.Parameter(2)), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
-    EXPECT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(4U, s.size());
-  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
-  ASSERT_EQ(1U, s[3]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(SUDIV);
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(6U, s.size());
-  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
-  ASSERT_EQ(1U, s[3]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
-  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
-  ASSERT_EQ(1U, s[4]->OutputCount());
-  ASSERT_EQ(2U, s[4]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
-  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
-  ASSERT_EQ(1U, s[5]->OutputCount());
-  ASSERT_EQ(2U, s[5]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(SUDIV);
-  ASSERT_EQ(3U, s.size());
-  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  ASSERT_EQ(2U, s[1]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
-  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(MLS, SUDIV);
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  ASSERT_EQ(3U, s[1]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
-  // x * (2^k + 1) -> x + (x >> k)
-  TRACED_FORRANGE(int32_t, k, 1, 30) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  // x * (2^k - 1) -> -x + (x >> k)
-  TRACED_FORRANGE(int32_t, k, 3, 30) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  // (2^k + 1) * x -> x + (x >> k)
-  TRACED_FORRANGE(int32_t, k, 1, 30) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  // x * (2^k - 1) -> -x + (x >> k)
-  TRACED_FORRANGE(int32_t, k, 3, 30) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(
-      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
-  Stream s = m.Build();
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArmSub, s[1]->arch_opcode());
-  ASSERT_EQ(2U, s[1]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(
-      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
-  Stream s = m.Build(MLS);
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMls, s[0]->arch_opcode());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(3U, s[0]->InputCount());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32UDivWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(4U, s.size());
-  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
-  ASSERT_EQ(1U, s[3]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32UDivWithParametersForSUDIV) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(SUDIV);
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32UModWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(6U, s.size());
-  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
-  ASSERT_EQ(1U, s[3]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
-  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
-  ASSERT_EQ(1U, s[4]->OutputCount());
-  ASSERT_EQ(2U, s[4]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
-  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
-  ASSERT_EQ(1U, s[5]->OutputCount());
-  ASSERT_EQ(2U, s[5]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIV) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(SUDIV);
-  ASSERT_EQ(3U, s.size());
-  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  ASSERT_EQ(2U, s[1]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
-  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
-  ASSERT_EQ(1U, s[2]->OutputCount());
-  ASSERT_EQ(2U, s[2]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
-}
-
-
-TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIVAndMLS) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build(MLS, SUDIV);
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  ASSERT_EQ(3U, s[1]->InputCount());
-  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
-  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
-}
-
-
-TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
-  TRACED_FORRANGE(int32_t, width, 1, 32) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32And(m.Parameter(0),
-                         m.Int32Constant(0xffffffffu >> (32 - width))));
-    Stream s = m.Build(ARMv7);
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-  }
-  TRACED_FORRANGE(int32_t, width, 1, 32) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
-                         m.Parameter(0)));
-    Stream s = m.Build(ARMv7);
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Word32And(
-          m.Parameter(0),
-          m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
-      ASSERT_EQ(1U, s[0]->OutputCount());
-      EXPECT_TRUE(
-          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(
-          m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
-                      m.Parameter(0)));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
-      ASSERT_EQ(1U, s[0]->OutputCount());
-      EXPECT_TRUE(
-          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
-      uint32_t max = 1 << lsb;
-      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
-      uint32_t jnk = rng()->NextInt(max);
-      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
-                           m.Int32Constant(lsb)));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
-      uint32_t max = 1 << lsb;
-      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
-      uint32_t jnk = rng()->NextInt(max);
-      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
-                           m.Int32Constant(lsb)));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-    EXPECT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-    EXPECT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kEqual, s[0]->flags_condition());
-}
-
-
-TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    if (imm == 0) continue;
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    if (imm == 0) continue;
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
-  StreamBuilder m(this, kMachInt32, kMachInt32);
-  m.Return(m.Word32Not(m.Parameter(0)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
-  EXPECT_EQ(1U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
-                           m.Int32Constant(0xffffffffu >> (32 - width))));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
-    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
-                           m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
-      Stream s = m.Build(ARMv7);
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
-      ASSERT_EQ(3U, s[0]->InputCount());
-      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
-    }
-  }
-}
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index ae93b27..ef9e89e 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -11,16 +11,17 @@
 namespace compiler {
 
 // Adds Arm-specific methods for generating InstructionOperands.
-class ArmOperandGenerator FINAL : public OperandGenerator {
+class ArmOperandGenerator : public OperandGenerator {
  public:
   explicit ArmOperandGenerator(InstructionSelector* selector)
       : OperandGenerator(selector) {}
 
-  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
-    if (CanBeImmediate(node, opcode)) {
-      return UseImmediate(node);
-    }
-    return UseRegister(node);
+  bool CanBeImmediate(int32_t value) const {
+    return Assembler::ImmediateFitsAddrMode1Instruction(value);
+  }
+
+  bool CanBeImmediate(uint32_t value) const {
+    return CanBeImmediate(bit_cast<int32_t>(value));
   }
 
   bool CanBeImmediate(Node* node, InstructionCode opcode) {
@@ -32,27 +33,25 @@
       case kArmMov:
       case kArmMvn:
       case kArmBic:
-        return ImmediateFitsAddrMode1Instruction(value) ||
-               ImmediateFitsAddrMode1Instruction(~value);
+        return CanBeImmediate(value) || CanBeImmediate(~value);
 
       case kArmAdd:
       case kArmSub:
       case kArmCmp:
       case kArmCmn:
-        return ImmediateFitsAddrMode1Instruction(value) ||
-               ImmediateFitsAddrMode1Instruction(-value);
+        return CanBeImmediate(value) || CanBeImmediate(-value);
 
       case kArmTst:
       case kArmTeq:
       case kArmOrr:
       case kArmEor:
       case kArmRsb:
-        return ImmediateFitsAddrMode1Instruction(value);
+        return CanBeImmediate(value);
 
-      case kArmVldr32:
-      case kArmVstr32:
-      case kArmVldr64:
-      case kArmVstr64:
+      case kArmVldrF32:
+      case kArmVstrF32:
+      case kArmVldrF64:
+      case kArmVstrF64:
         return value >= -1020 && value <= 1020 && (value % 4) == 0;
 
       case kArmLdrb:
@@ -68,49 +67,26 @@
       case kArmStrh:
         return value >= -255 && value <= 255;
 
-      case kArchCallCodeObject:
-      case kArchCallJSFunction:
-      case kArchJmp:
-      case kArchNop:
-      case kArchRet:
-      case kArchTruncateDoubleToI:
-      case kArmMul:
-      case kArmMla:
-      case kArmMls:
-      case kArmSdiv:
-      case kArmUdiv:
-      case kArmBfc:
-      case kArmUbfx:
-      case kArmVcmpF64:
-      case kArmVaddF64:
-      case kArmVsubF64:
-      case kArmVmulF64:
-      case kArmVmlaF64:
-      case kArmVmlsF64:
-      case kArmVdivF64:
-      case kArmVmodF64:
-      case kArmVnegF64:
-      case kArmVsqrtF64:
-      case kArmVcvtF64S32:
-      case kArmVcvtF64U32:
-      case kArmVcvtS32F64:
-      case kArmVcvtU32F64:
-      case kArmPush:
-        return false;
+      default:
+        break;
     }
-    UNREACHABLE();
     return false;
   }
-
- private:
-  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
-    return Assembler::ImmediateFitsAddrMode1Instruction(imm);
-  }
 };
 
 
-static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
-                            Node* node) {
+namespace {
+
+void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
   ArmOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
@@ -118,86 +94,69 @@
 }
 
 
-static bool TryMatchROR(InstructionSelector* selector,
-                        InstructionCode* opcode_return, Node* node,
-                        InstructionOperand** value_return,
-                        InstructionOperand** shift_return) {
+template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
+          AddressingMode kImmMode, AddressingMode kRegMode>
+bool TryMatchShift(InstructionSelector* selector,
+                   InstructionCode* opcode_return, Node* node,
+                   InstructionOperand** value_return,
+                   InstructionOperand** shift_return) {
   ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Ror) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 31)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
-    *shift_return = g.UseRegister(m.right().node());
+  if (node->opcode() == kOpcode) {
+    Int32BinopMatcher m(node);
+    *value_return = g.UseRegister(m.left().node());
+    if (m.right().IsInRange(kImmMin, kImmMax)) {
+      *opcode_return |= AddressingModeField::encode(kImmMode);
+      *shift_return = g.UseImmediate(m.right().node());
+    } else {
+      *opcode_return |= AddressingModeField::encode(kRegMode);
+      *shift_return = g.UseRegister(m.right().node());
+    }
+    return true;
   }
-  return true;
+  return false;
 }
 
 
-static inline bool TryMatchASR(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Sar) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 32)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
+                       kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchLSL(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Shl) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(0, 31)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
+                       kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchLSR(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Shr) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 32)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
+                       kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchShift(InstructionSelector* selector,
-                                 InstructionCode* opcode_return, Node* node,
-                                 InstructionOperand** value_return,
-                                 InstructionOperand** shift_return) {
+bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
+                       kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
+}
+
+
+bool TryMatchShift(InstructionSelector* selector,
+                   InstructionCode* opcode_return, Node* node,
+                   InstructionOperand** value_return,
+                   InstructionOperand** shift_return) {
   return (
       TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
       TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
@@ -206,11 +165,10 @@
 }
 
 
-static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
-                                            InstructionCode* opcode_return,
-                                            Node* node,
-                                            size_t* input_count_return,
-                                            InstructionOperand** inputs) {
+bool TryMatchImmediateOrShift(InstructionSelector* selector,
+                              InstructionCode* opcode_return, Node* node,
+                              size_t* input_count_return,
+                              InstructionOperand** inputs) {
   ArmOperandGenerator g(selector);
   if (g.CanBeImmediate(node, *opcode_return)) {
     *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
@@ -226,9 +184,9 @@
 }
 
 
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, InstructionCode reverse_opcode,
-                       FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, InstructionCode reverse_opcode,
+                FlagsContinuation* cont) {
   ArmOperandGenerator g(selector);
   Int32BinopMatcher m(node);
   InstructionOperand* inputs[5];
@@ -236,8 +194,20 @@
   InstructionOperand* outputs[2];
   size_t output_count = 0;
 
-  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
-                               &input_count, &inputs[1])) {
+  if (m.left().node() == m.right().node()) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov r0, r1, asr #16
+    //   adds r0, r0, r1, asr #16
+    //   bvs label
+    InstructionOperand* const input = g.UseRegister(m.left().node());
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                                      &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
   } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
@@ -274,13 +244,16 @@
 }
 
 
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, InstructionCode reverse_opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, InstructionCode reverse_opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, reverse_opcode, &cont);
 }
 
 
+}  // namespace
+
+
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
@@ -291,10 +264,10 @@
   ArchOpcode opcode;
   switch (rep) {
     case kRepFloat32:
-      opcode = kArmVldr32;
+      opcode = kArmVldrF32;
       break;
     case kRepFloat64:
-      opcode = kArmVldr64;
+      opcode = kArmVldrF64;
       break;
     case kRepBit:  // Fall through.
     case kRepWord8:
@@ -346,10 +319,10 @@
   ArchOpcode opcode;
   switch (rep) {
     case kRepFloat32:
-      opcode = kArmVstr32;
+      opcode = kArmVstrF32;
       break;
     case kRepFloat64:
-      opcode = kArmVstr64;
+      opcode = kArmVstrF64;
       break;
     case kRepBit:  // Fall through.
     case kRepWord8:
@@ -377,8 +350,86 @@
 }
 
 
-static inline void EmitBic(InstructionSelector* selector, Node* node,
-                           Node* left, Node* right) {
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  ArmOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+                                           ? g.UseImmediate(length)
+                                           : g.UseRegister(length);
+  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer), offset_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  ArmOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+                                           ? g.UseImmediate(length)
+                                           : g.UseRegister(length);
+  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
+       offset_operand, length_operand, g.UseRegister(value),
+       g.UseRegister(buffer), offset_operand);
+}
+
+
+namespace {
+
+void EmitBic(InstructionSelector* selector, Node* node, Node* left,
+             Node* right) {
   ArmOperandGenerator g(selector);
   InstructionCode opcode = kArmBic;
   InstructionOperand* value_operand;
@@ -394,6 +445,18 @@
 }
 
 
+void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
+              uint32_t lsb, uint32_t width) {
+  DCHECK_LE(1, width);
+  DCHECK_LE(width, 32 - lsb);
+  ArmOperandGenerator g(selector);
+  selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
+                 g.TempImmediate(lsb), g.TempImmediate(width));
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitWord32And(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -411,33 +474,50 @@
       return;
     }
   }
-  if (IsSupported(ARMv7) && m.right().HasValue()) {
-    uint32_t value = m.right().Value();
+  if (m.right().HasValue()) {
+    uint32_t const value = m.right().Value();
     uint32_t width = base::bits::CountPopulation32(value);
     uint32_t msb = base::bits::CountLeadingZeros32(value);
-    if (width != 0 && msb + width == 32) {
+    // Try to interpret this AND as UBFX.
+    if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
       DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
       if (m.left().IsWord32Shr()) {
         Int32BinopMatcher mleft(m.left().node());
         if (mleft.right().IsInRange(0, 31)) {
-          Emit(kArmUbfx, g.DefineAsRegister(node),
-               g.UseRegister(mleft.left().node()),
-               g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
-          return;
+          // UBFX cannot extract bits past the register size, however since
+          // shifting the original value would have introduced some zeros we can
+          // still use UBFX with a smaller mask and the remaining bits will be
+          // zeros.
+          uint32_t const lsb = mleft.right().Value();
+          return EmitUbfx(this, node, mleft.left().node(), lsb,
+                          std::min(width, 32 - lsb));
         }
       }
-      Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
-           g.TempImmediate(0), g.TempImmediate(width));
+      return EmitUbfx(this, node, m.left().node(), 0, width);
+    }
+    // Try to interpret this AND as BIC.
+    if (g.CanBeImmediate(~value)) {
+      Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(~value));
+      return;
+    }
+    // Try to interpret this AND as UXTH.
+    if (value == 0xffff) {
+      Emit(kArmUxth, g.DefineAsRegister(m.node()),
+           g.UseRegister(m.left().node()), g.TempImmediate(0));
       return;
     }
     // Try to interpret this AND as BFC.
-    width = 32 - width;
-    msb = base::bits::CountLeadingZeros32(~value);
-    uint32_t lsb = base::bits::CountTrailingZeros32(~value);
-    if (msb + width + lsb == 32) {
-      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
-           g.TempImmediate(lsb), g.TempImmediate(width));
-      return;
+    if (IsSupported(ARMv7)) {
+      width = 32 - width;
+      msb = base::bits::CountLeadingZeros32(~value);
+      uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+      if (msb + width + lsb == 32) {
+        Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+             g.TempImmediate(lsb), g.TempImmediate(width));
+        return;
+      }
     }
   }
   VisitBinop(this, node, kArmAnd, kArmAnd);
@@ -530,10 +610,7 @@
       uint32_t msb = base::bits::CountLeadingZeros32(value);
       if (msb + width + lsb == 32) {
         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
-        Emit(kArmUbfx, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(width));
-        return;
+        return EmitUbfx(this, node, mleft.left().node(), lsb, width);
       }
     }
   }
@@ -542,6 +619,20 @@
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kArmSxth, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kArmSxtb, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    }
+  }
   VisitShift(this, node, TryMatchASR);
 }
 
@@ -554,17 +645,113 @@
 void InstructionSelector::VisitInt32Add(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
-  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
-         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
-    return;
+  if (CanCover(node, m.left().node())) {
+    switch (m.left().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mleft.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (CanCover(mleft.node(), mleft.left().node()) &&
+            mleft.left().IsWord32Shl()) {
+          Int32BinopMatcher mleftleft(mleft.left().node());
+          if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
-  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
-    return;
+  if (CanCover(node, m.right().node())) {
+    switch (m.right().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mright(m.right().node());
+        if (mright.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mright.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mright(m.right().node());
+        if (CanCover(mright.node(), mright.left().node()) &&
+            mright.left().IsWord32Shl()) {
+          Int32BinopMatcher mrightleft(mright.left().node());
+          if (mright.right().Is(24) && mrightleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
   VisitBinop(this, node, kArmAdd, kArmAdd);
 }
@@ -609,6 +796,22 @@
 }
 
 
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  ArmOperandGenerator g(this);
+  InstructionOperand* outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
+  InstructionOperand* inputs[] = {g.UseRegister(node->InputAt(0)),
+                                  g.UseRegister(node->InputAt(1))};
+  Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+
 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
                     ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
                     InstructionOperand* result_operand,
@@ -646,7 +849,7 @@
 }
 
 
-void InstructionSelector::VisitInt32UDiv(Node* node) {
+void InstructionSelector::VisitUint32Div(Node* node) {
   VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
 }
 
@@ -678,11 +881,18 @@
 }
 
 
-void InstructionSelector::VisitInt32UMod(Node* node) {
+void InstructionSelector::VisitUint32Mod(Node* node) {
   VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
 }
 
 
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   ArmOperandGenerator g(this);
   Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
@@ -711,18 +921,25 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitFloat64Add(Node* node) {
   ArmOperandGenerator g(this);
-  Int32BinopMatcher m(node);
+  Float64BinopMatcher m(node);
   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
+    Float64BinopMatcher mleft(m.left().node());
     Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
          g.UseRegister(mleft.right().node()));
     return;
   }
   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
+    Float64BinopMatcher mright(m.right().node());
     Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
          g.UseRegister(mright.left().node()),
          g.UseRegister(mright.right().node()));
@@ -734,9 +951,14 @@
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   ArmOperandGenerator g(this);
-  Int32BinopMatcher m(node);
+  Float64BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    Emit(kArmVnegF64, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
+    Float64BinopMatcher mright(m.right().node());
     Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
          g.UseRegister(mright.left().node()),
          g.UseRegister(mright.right().node()));
@@ -747,13 +969,7 @@
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
-  ArmOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.right().Is(-1.0)) {
-    Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitRRRFloat64(this, kArmVmulF64, node);
-  }
+  VisitRRRFloat64(this, kArmVmulF64, node);
 }
 
 
@@ -775,15 +991,38 @@
 }
 
 
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
-                                    BasicBlock* deoptimization) {
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVfloorF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVceilF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTruncateF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
   ArmOperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
     frame_state_descriptor =
-        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
   }
 
   CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@@ -792,7 +1031,7 @@
   // TODO(turbofan): on ARM64 it's probably better to use the code object in a
   // register if there are multiple uses of it. Improve constant pool and the
   // heuristics in the register allocator for where to emit constants.
-  InitializeCallBuffer(call, &buffer, true, false);
+  InitializeCallBuffer(node, &buffer, true, false);
 
   // TODO(dcarney): might be possible to use claim/poke instead
   // Push any stack arguments.
@@ -818,34 +1057,39 @@
   opcode |= MiscField::encode(descriptor->flags());
 
   // Emit the call instruction.
+  InstructionOperand** first_output =
+      buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
   Instruction* call_instr =
-      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+      Emit(opcode, buffer.outputs.size(), first_output,
            buffer.instruction_args.size(), &buffer.instruction_args.front());
-
   call_instr->MarkAsCall();
-  if (deoptimization != NULL) {
-    DCHECK(continuation != NULL);
-    call_instr->MarkAsControl();
+}
+
+
+namespace {
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  Float64BinopMatcher m(node);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(kArmVcmpF64), nullptr,
+                   g.UseRegister(m.left().node()),
+                   g.UseRegister(m.right().node()), g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(
+        cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+        g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
   }
 }
 
 
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kArmAdd, kArmAdd, cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kArmSub, kArmRsb, cont);
-}
-
-
-// Shared routine for multiple compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
-                             InstructionCode opcode, FlagsContinuation* cont,
-                             bool commutative) {
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont) {
   ArmOperandGenerator g(selector);
   Int32BinopMatcher m(node);
   InstructionOperand* inputs[5];
@@ -859,7 +1103,7 @@
     input_count++;
   } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
                                       &input_count, &inputs[1])) {
-    if (!commutative) cont->Commute();
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     inputs[0] = g.UseRegister(m.right().node());
     input_count++;
   } else {
@@ -886,63 +1130,211 @@
 }
 
 
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kInt32Add:
-      return VisitWordCompare(this, node, kArmCmn, cont, true);
-    case IrOpcode::kInt32Sub:
-      return VisitWordCompare(this, node, kArmCmp, cont, false);
-    case IrOpcode::kWord32And:
-      return VisitWordCompare(this, node, kArmTst, cont, true);
-    case IrOpcode::kWord32Or:
-      return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
-    case IrOpcode::kWord32Xor:
-      return VisitWordCompare(this, node, kArmTeq, cont, true);
-    case IrOpcode::kWord32Sar:
-      return VisitShift(this, node, TryMatchASR, cont);
-    case IrOpcode::kWord32Shl:
-      return VisitShift(this, node, TryMatchLSL, cont);
-    case IrOpcode::kWord32Shr:
-      return VisitShift(this, node, TryMatchLSR, cont);
-    case IrOpcode::kWord32Ror:
-      return VisitShift(this, node, TryMatchROR, cont);
-    default:
-      break;
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kArmCmp, cont);
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = node->FindProjection(0);
+          if (!result || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Add:
+        return VisitWordCompare(selector, value, kArmCmn, cont);
+      case IrOpcode::kInt32Sub:
+        return VisitWordCompare(selector, value, kArmCmp, cont);
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kArmTst, cont);
+      case IrOpcode::kWord32Or:
+        return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
+      case IrOpcode::kWord32Xor:
+        return VisitWordCompare(selector, value, kArmTeq, cont);
+      case IrOpcode::kWord32Sar:
+        return VisitShift(selector, value, TryMatchASR, cont);
+      case IrOpcode::kWord32Shl:
+        return VisitShift(selector, value, TryMatchLSL, cont);
+      case IrOpcode::kWord32Shr:
+        return VisitShift(selector, value, TryMatchLSR, cont);
+      case IrOpcode::kWord32Ror:
+        return VisitShift(selector, value, TryMatchROR, cont);
+      default:
+        break;
+    }
+    break;
   }
 
-  ArmOperandGenerator g(this);
-  InstructionCode opcode =
+  // Continuation could not be combined with a compare, emit compare against 0.
+  ArmOperandGenerator g(selector);
+  InstructionCode const opcode =
       cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+  InstructionOperand* const value_operand = g.UseRegister(value);
   if (cont->IsBranch()) {
-    Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
-         g.Label(cont->true_block()),
-         g.Label(cont->false_block()))->MarkAsControl();
+    selector->Emit(opcode, nullptr, value_operand, value_operand,
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
   } else {
-    Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
-         g.UseRegister(node));
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+                   value_operand);
   }
 }
 
+}  // namespace
 
-void InstructionSelector::VisitWord32Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kArmCmp, cont, false);
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
 
-void InstructionSelector::VisitFloat64Compare(Node* node,
-                                              FlagsContinuation* cont) {
-  ArmOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (cont->IsBranch()) {
-    Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
-         g.UseRegister(m.right().node()), g.Label(cont->true_block()),
-         g.Label(cont->false_block()))->MarkAsControl();
-  } else {
-    DCHECK(cont->IsSet());
-    Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
-         g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   }
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kArmSub, kArmRsb, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  MachineOperatorBuilder::Flags flags =
+      MachineOperatorBuilder::kInt32DivIsSafe |
+      MachineOperatorBuilder::kUint32DivIsSafe;
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    flags |= MachineOperatorBuilder::kFloat64Floor |
+             MachineOperatorBuilder::kFloat64Ceil |
+             MachineOperatorBuilder::kFloat64RoundTruncate |
+             MachineOperatorBuilder::kFloat64RoundTiesAway;
+  }
+  return flags;
 }
 
 }  // namespace compiler
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
index 6673a47..3fca76f 100644
--- a/src/compiler/arm/linkage-arm.cc
+++ b/src/compiler/arm/linkage-arm.cc
@@ -35,8 +35,9 @@
 
 typedef LinkageHelper<ArmLinkageHelperTraits> LH;
 
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }
 
 
@@ -49,10 +50,10 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 31c53d3..e025236 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -24,6 +24,18 @@
   Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
       : InstructionOperandConverter(gen, instr) {}
 
+  DoubleRegister InputFloat32Register(int index) {
+    return InputDoubleRegister(index).S();
+  }
+
+  DoubleRegister InputFloat64Register(int index) {
+    return InputDoubleRegister(index);
+  }
+
+  DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
+
+  DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+
   Register InputRegister32(int index) {
     return ToRegister(instr_->InputAt(index)).W();
   }
@@ -46,26 +58,68 @@
 
   Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
 
+  Operand InputOperand2_32(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand32(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  Operand InputOperand2_64(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand64(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
   MemOperand MemoryOperand(int* first_index) {
     const int index = *first_index;
     switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ROR_I:
         break;
       case kMode_MRI:
         *first_index += 2;
         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
       case kMode_MRR:
         *first_index += 2;
-        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
-                          SXTW);
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
     }
     UNREACHABLE();
     return MemOperand(no_reg);
   }
 
-  MemOperand MemoryOperand() {
-    int index = 0;
-    return MemoryOperand(&index);
+  MemOperand MemoryOperand(int first_index = 0) {
+    return MemoryOperand(&first_index);
   }
 
   Operand ToOperand(InstructionOperand* op) {
@@ -89,6 +143,9 @@
         return Operand(constant.ToInt32());
       case Constant::kInt64:
         return Operand(constant.ToInt64());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
       case Constant::kFloat64:
         return Operand(
             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
@@ -96,6 +153,9 @@
         return Operand(constant.ToExternalReference());
       case Constant::kHeapObject:
         return Operand(constant.ToHeapObject());
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(dcarney): RPO immediates on arm64.
+        break;
     }
     UNREACHABLE();
     return Operand(-1);
@@ -114,6 +174,106 @@
 };
 
 
+namespace {
+
+class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ Mov(result_, 0); }
+
+ private:
+  Register const result_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                         \
+  do {                                                             \
+    auto result = i.OutputFloat##width##Register();                \
+    auto buffer = i.InputRegister(0);                              \
+    auto offset = i.InputRegister32(1);                            \
+    auto length = i.InputOperand32(2);                             \
+    __ Cmp(offset, length);                                        \
+    auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
+    __ B(hs, ool->entry());                                        \
+    __ Ldr(result, MemOperand(buffer, offset, UXTW));              \
+    __ Bind(ool->exit());                                          \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
+  do {                                                       \
+    auto result = i.OutputRegister32();                      \
+    auto buffer = i.InputRegister(0);                        \
+    auto offset = i.InputRegister32(1);                      \
+    auto length = i.InputOperand32(2);                       \
+    __ Cmp(offset, length);                                  \
+    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+    __ B(hs, ool->entry());                                  \
+    __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
+    __ Bind(ool->exit());                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width)          \
+  do {                                               \
+    auto buffer = i.InputRegister(0);                \
+    auto offset = i.InputRegister32(1);              \
+    auto length = i.InputOperand32(2);               \
+    auto value = i.InputFloat##width##Register(3);   \
+    __ Cmp(offset, length);                          \
+    Label done;                                      \
+    __ B(hs, &done);                                 \
+    __ Str(value, MemOperand(buffer, offset, UXTW)); \
+    __ Bind(&done);                                  \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)          \
+  do {                                                     \
+    auto buffer = i.InputRegister(0);                      \
+    auto offset = i.InputRegister32(1);                    \
+    auto length = i.InputOperand32(2);                     \
+    auto value = i.InputRegister32(3);                     \
+    __ Cmp(offset, length);                                \
+    Label done;                                            \
+    __ B(hs, &done);                                       \
+    __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
+    __ Bind(&done);                                        \
+  } while (0)
+
+
 #define ASSEMBLE_SHIFT(asm_instr, width)                                       \
   do {                                                                         \
     if (instr->InputAt(1)->IsRegister()) {                                     \
@@ -123,7 +283,7 @@
       int64_t imm = i.InputOperand##width(1).immediate().value();              \
       __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
     }                                                                          \
-  } while (0);
+  } while (0)
 
 
 // Assembles an instruction after register allocation, producing machine code.
@@ -161,7 +321,7 @@
       break;
     }
     case kArchJmp:
-      __ B(code_->GetLabel(i.InputBlock(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -169,25 +329,47 @@
     case kArchRet:
       AssembleReturn();
       break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), masm()->StackPointer());
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float64Ceil:
+      __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64Floor:
+      __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTruncate:
+      __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTiesAway:
+      __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArm64Add:
-      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Add32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Adds(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Add(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
     case kArm64And:
-      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64And32:
-      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+      break;
+    case kArm64Bic:
+      __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+      break;
+    case kArm64Bic32:
+      __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Mul:
       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -195,6 +377,34 @@
     case kArm64Mul32:
       __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
       break;
+    case kArm64Smull:
+      __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Umull:
+      __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Madd:
+      __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+              i.InputRegister(2));
+      break;
+    case kArm64Madd32:
+      __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
+              i.InputRegister32(2));
+      break;
+    case kArm64Msub:
+      __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+              i.InputRegister(2));
+      break;
+    case kArm64Msub32:
+      __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
+              i.InputRegister32(2));
+      break;
+    case kArm64Mneg:
+      __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Mneg32:
+      __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
     case kArm64Idiv:
       __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
       break;
@@ -251,44 +461,57 @@
       __ Neg(i.OutputRegister32(), i.InputOperand32(0));
       break;
     case kArm64Or:
-      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Or32:
-      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
-    case kArm64Xor:
-      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+    case kArm64Orn:
+      __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
-    case kArm64Xor32:
-      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+    case kArm64Orn32:
+      __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+      break;
+    case kArm64Eor:
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+      break;
+    case kArm64Eor32:
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+      break;
+    case kArm64Eon:
+      __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+      break;
+    case kArm64Eon32:
+      __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Sub:
-      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Sub32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Subs(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
-    case kArm64Shl:
+    case kArm64Lsl:
       ASSEMBLE_SHIFT(Lsl, 64);
       break;
-    case kArm64Shl32:
+    case kArm64Lsl32:
       ASSEMBLE_SHIFT(Lsl, 32);
       break;
-    case kArm64Shr:
+    case kArm64Lsr:
       ASSEMBLE_SHIFT(Lsr, 64);
       break;
-    case kArm64Shr32:
+    case kArm64Lsr32:
       ASSEMBLE_SHIFT(Lsr, 32);
       break;
-    case kArm64Sar:
+    case kArm64Asr:
       ASSEMBLE_SHIFT(Asr, 64);
       break;
-    case kArm64Sar32:
+    case kArm64Asr32:
       ASSEMBLE_SHIFT(Asr, 32);
       break;
     case kArm64Ror:
@@ -300,9 +523,30 @@
     case kArm64Mov32:
       __ Mov(i.OutputRegister32(), i.InputRegister32(0));
       break;
+    case kArm64Sxtb32:
+      __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
+      break;
+    case kArm64Sxth32:
+      __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
+      break;
     case kArm64Sxtw:
       __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
       break;
+    case kArm64Ubfx:
+      __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      break;
+    case kArm64Ubfx32:
+      __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
+              i.InputInt8(2));
+      break;
+    case kArm64TestAndBranch32:
+    case kArm64TestAndBranch:
+      // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
+      break;
+    case kArm64CompareAndBranch32:
+      // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
+      break;
     case kArm64Claim: {
       int words = MiscField::decode(instr->opcode());
       __ Claim(words);
@@ -376,6 +620,12 @@
     case kArm64Float64Sqrt:
       __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float32ToFloat64:
+      __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
+      break;
+    case kArm64Float64ToFloat32:
+      __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
+      break;
     case kArm64Float64ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
@@ -418,20 +668,12 @@
     case kArm64Str:
       __ Str(i.InputRegister(2), i.MemoryOperand());
       break;
-    case kArm64LdrS: {
-      UseScratchRegisterScope scope(masm());
-      FPRegister scratch = scope.AcquireS();
-      __ Ldr(scratch, i.MemoryOperand());
-      __ Fcvt(i.OutputDoubleRegister(), scratch);
+    case kArm64LdrS:
+      __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
       break;
-    }
-    case kArm64StrS: {
-      UseScratchRegisterScope scope(masm());
-      FPRegister scratch = scope.AcquireS();
-      __ Fcvt(scratch, i.InputDoubleRegister(2));
-      __ Str(scratch, i.MemoryOperand());
+    case kArm64StrS:
+      __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
       break;
-    }
     case kArm64LdrD:
       __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
       break;
@@ -444,9 +686,8 @@
       Register value = i.InputRegister(2);
       __ Add(index, object, Operand(index, SXTW));
       __ Str(value, MemOperand(index));
-      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
-                                ? kSaveFPRegs
-                                : kDontSaveFPRegs;
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
       // TODO(dcarney): we shouldn't test write barriers from c calls.
       LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
       UseScratchRegisterScope scope(masm());
@@ -462,81 +703,154 @@
       }
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Str);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(32);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(64);
+      break;
   }
 }
 
 
 // Assemble branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   Arm64OperandConverter i(this, instr);
-  Label done;
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  FlagsCondition condition = branch->condition;
+  ArchOpcode opcode = instr->arch_opcode();
 
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = code()->GetLabel(tblock);
-  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
-  switch (condition) {
-    case kUnorderedEqual:
-      __ B(vs, flabel);
-    // Fall through.
-    case kEqual:
-      __ B(eq, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ B(ne, tlabel);
-      break;
-    case kSignedLessThan:
-      __ B(lt, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ B(ge, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ B(le, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ B(gt, tlabel);
-      break;
-    case kUnorderedLessThan:
-      __ B(vs, flabel);
-    // Fall through.
-    case kUnsignedLessThan:
-      __ B(lo, tlabel);
-      break;
-    case kUnorderedGreaterThanOrEqual:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kUnsignedGreaterThanOrEqual:
-      __ B(hs, tlabel);
-      break;
-    case kUnorderedLessThanOrEqual:
-      __ B(vs, flabel);
-    // Fall through.
-    case kUnsignedLessThanOrEqual:
-      __ B(ls, tlabel);
-      break;
-    case kUnorderedGreaterThan:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kUnsignedGreaterThan:
-      __ B(hi, tlabel);
-      break;
-    case kOverflow:
-      __ B(vs, tlabel);
-      break;
-    case kNotOverflow:
-      __ B(vc, tlabel);
-      break;
+  if (opcode == kArm64CompareAndBranch32) {
+    switch (condition) {
+      case kEqual:
+        __ Cbz(i.InputRegister32(0), tlabel);
+        break;
+      case kNotEqual:
+        __ Cbnz(i.InputRegister32(0), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (opcode == kArm64TestAndBranch32) {
+    switch (condition) {
+      case kEqual:
+        __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+        break;
+      case kNotEqual:
+        __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (opcode == kArm64TestAndBranch) {
+    switch (condition) {
+      case kEqual:
+        __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+        break;
+      case kNotEqual:
+        __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    switch (condition) {
+      case kUnorderedEqual:
+        // The "eq" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kEqual:
+        __ B(eq, tlabel);
+        break;
+      case kUnorderedNotEqual:
+        // Unordered or not equal can be tested with "ne" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kNotEqual:
+        __ B(ne, tlabel);
+        break;
+      case kSignedLessThan:
+        __ B(lt, tlabel);
+        break;
+      case kSignedGreaterThanOrEqual:
+        __ B(ge, tlabel);
+        break;
+      case kSignedLessThanOrEqual:
+        __ B(le, tlabel);
+        break;
+      case kSignedGreaterThan:
+        __ B(gt, tlabel);
+        break;
+      case kUnorderedLessThan:
+        // The "lo" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kUnsignedLessThan:
+        __ B(lo, tlabel);
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        // Unordered, greater than or equal can be tested with "hs" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kUnsignedGreaterThanOrEqual:
+        __ B(hs, tlabel);
+        break;
+      case kUnorderedLessThanOrEqual:
+        // The "ls" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kUnsignedLessThanOrEqual:
+        __ B(ls, tlabel);
+        break;
+      case kUnorderedGreaterThan:
+        // Unordered or greater than can be tested with "hi" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kUnsignedGreaterThan:
+        __ B(hi, tlabel);
+        break;
+      case kOverflow:
+        __ B(vs, tlabel);
+        break;
+      case kNotOverflow:
+        __ B(vc, tlabel);
+        break;
+    }
   }
-  if (!fallthru) __ B(flabel);  // no fallthru to flabel.
-  __ Bind(&done);
+  if (!branch->fallthru) __ B(flabel);  // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
 }
 
 
@@ -620,7 +934,7 @@
       cc = vc;
       break;
   }
-  __ bind(&check);
+  __ Bind(&check);
   __ Cset(reg, cc);
   __ Bind(&done);
 }
@@ -650,28 +964,11 @@
     __ PushCalleeSavedRegisters();
     frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
   } else if (descriptor->IsJSFunctionCall()) {
-    CompilationInfo* info = linkage()->info();
+    CompilationInfo* info = this->info();
     __ SetStackPointer(jssp);
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
-      __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-      __ Ldr(x10, GlobalObjectMemOperand());
-      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
-      __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
-      __ Bind(&ok);
-    }
-
   } else {
     __ SetStackPointer(jssp);
     __ StubPrologue();
@@ -742,12 +1039,11 @@
       __ Str(temp, g.ToMemOperand(destination, masm()));
     }
   } else if (source->IsConstant()) {
-    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    Constant src = g.ToConstant(ConstantOperand::cast(source));
     if (destination->IsRegister() || destination->IsStackSlot()) {
       UseScratchRegisterScope scope(masm());
       Register dst = destination->IsRegister() ? g.ToRegister(destination)
                                                : scope.AcquireX();
-      Constant src = g.ToConstant(source);
       if (src.type() == Constant::kHeapObject) {
         __ LoadObject(dst, src.ToHeapObject());
       } else {
@@ -756,15 +1052,29 @@
       if (destination->IsStackSlot()) {
         __ Str(dst, g.ToMemOperand(destination, masm()));
       }
-    } else if (destination->IsDoubleRegister()) {
-      FPRegister result = g.ToDoubleRegister(destination);
-      __ Fmov(result, g.ToDouble(constant_source));
+    } else if (src.type() == Constant::kFloat32) {
+      if (destination->IsDoubleRegister()) {
+        FPRegister dst = g.ToDoubleRegister(destination).S();
+        __ Fmov(dst, src.ToFloat32());
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        UseScratchRegisterScope scope(masm());
+        FPRegister temp = scope.AcquireS();
+        __ Fmov(temp, src.ToFloat32());
+        __ Str(temp, g.ToMemOperand(destination, masm()));
+      }
     } else {
-      DCHECK(destination->IsDoubleStackSlot());
-      UseScratchRegisterScope scope(masm());
-      FPRegister temp = scope.AcquireD();
-      __ Fmov(temp, g.ToDouble(constant_source));
-      __ Str(temp, g.ToMemOperand(destination, masm()));
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      if (destination->IsDoubleRegister()) {
+        FPRegister dst = g.ToDoubleRegister(destination);
+        __ Fmov(dst, src.ToFloat64());
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        UseScratchRegisterScope scope(masm());
+        FPRegister temp = scope.AcquireD();
+        __ Fmov(temp, src.ToFloat64());
+        __ Str(temp, g.ToMemOperand(destination, masm()));
+      }
     }
   } else if (source->IsDoubleRegister()) {
     FPRegister src = g.ToDoubleRegister(source);
@@ -816,8 +1126,8 @@
     }
   } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
     UseScratchRegisterScope scope(masm());
-    CPURegister temp_0 = scope.AcquireX();
-    CPURegister temp_1 = scope.AcquireX();
+    DoubleRegister temp_0 = scope.AcquireD();
+    DoubleRegister temp_1 = scope.AcquireD();
     MemOperand src = g.ToMemOperand(source, masm());
     MemOperand dst = g.ToMemOperand(destination, masm());
     __ Ldr(temp_0, src);
@@ -852,7 +1162,7 @@
 
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
+  if (!info()->IsStub()) {
     // Ensure that we have enough space after the previous lazy-bailout
     // instruction for patching the code here.
     intptr_t current_pc = masm()->pc_offset();
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index 0a9a2ed..863451f 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -16,6 +16,8 @@
   V(Arm64Add32)                    \
   V(Arm64And)                      \
   V(Arm64And32)                    \
+  V(Arm64Bic)                      \
+  V(Arm64Bic32)                    \
   V(Arm64Cmp)                      \
   V(Arm64Cmp32)                    \
   V(Arm64Cmn)                      \
@@ -24,12 +26,24 @@
   V(Arm64Tst32)                    \
   V(Arm64Or)                       \
   V(Arm64Or32)                     \
-  V(Arm64Xor)                      \
-  V(Arm64Xor32)                    \
+  V(Arm64Orn)                      \
+  V(Arm64Orn32)                    \
+  V(Arm64Eor)                      \
+  V(Arm64Eor32)                    \
+  V(Arm64Eon)                      \
+  V(Arm64Eon32)                    \
   V(Arm64Sub)                      \
   V(Arm64Sub32)                    \
   V(Arm64Mul)                      \
   V(Arm64Mul32)                    \
+  V(Arm64Smull)                    \
+  V(Arm64Umull)                    \
+  V(Arm64Madd)                     \
+  V(Arm64Madd32)                   \
+  V(Arm64Msub)                     \
+  V(Arm64Msub32)                   \
+  V(Arm64Mneg)                     \
+  V(Arm64Mneg32)                   \
   V(Arm64Idiv)                     \
   V(Arm64Idiv32)                   \
   V(Arm64Udiv)                     \
@@ -42,16 +56,23 @@
   V(Arm64Not32)                    \
   V(Arm64Neg)                      \
   V(Arm64Neg32)                    \
-  V(Arm64Shl)                      \
-  V(Arm64Shl32)                    \
-  V(Arm64Shr)                      \
-  V(Arm64Shr32)                    \
-  V(Arm64Sar)                      \
-  V(Arm64Sar32)                    \
+  V(Arm64Lsl)                      \
+  V(Arm64Lsl32)                    \
+  V(Arm64Lsr)                      \
+  V(Arm64Lsr32)                    \
+  V(Arm64Asr)                      \
+  V(Arm64Asr32)                    \
   V(Arm64Ror)                      \
   V(Arm64Ror32)                    \
   V(Arm64Mov32)                    \
+  V(Arm64Sxtb32)                   \
+  V(Arm64Sxth32)                   \
   V(Arm64Sxtw)                     \
+  V(Arm64Ubfx)                     \
+  V(Arm64Ubfx32)                   \
+  V(Arm64TestAndBranch32)          \
+  V(Arm64TestAndBranch)            \
+  V(Arm64CompareAndBranch32)       \
   V(Arm64Claim)                    \
   V(Arm64Poke)                     \
   V(Arm64PokePairZero)             \
@@ -63,6 +84,12 @@
   V(Arm64Float64Div)               \
   V(Arm64Float64Mod)               \
   V(Arm64Float64Sqrt)              \
+  V(Arm64Float64Floor)             \
+  V(Arm64Float64Ceil)              \
+  V(Arm64Float64RoundTruncate)     \
+  V(Arm64Float64RoundTiesAway)     \
+  V(Arm64Float32ToFloat64)         \
+  V(Arm64Float64ToFloat32)         \
   V(Arm64Float64ToInt32)           \
   V(Arm64Float64ToUint32)          \
   V(Arm64Int32ToFloat64)           \
@@ -97,9 +124,13 @@
 // I = immediate (handle, external, int32)
 // MRI = [register + immediate]
 // MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MRI) /* [%r0 + K] */               \
-  V(MRR) /* [%r0 + %r1] */
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(MRI)              /* [%r0 + K] */   \
+  V(MRR)              /* [%r0 + %r1] */ \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_ROR_I) /* %r0 ROR K */
 
 }  // namespace internal
 }  // namespace compiler
diff --git a/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
deleted file mode 100644
index b5562c2..0000000
--- a/src/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ /dev/null
@@ -1,1121 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <list>
-
-#include "src/compiler/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-
-template <typename T>
-struct MachInst {
-  T constructor;
-  const char* constructor_name;
-  ArchOpcode arch_opcode;
-  MachineType machine_type;
-};
-
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
-
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
-  return os << mi.constructor_name;
-}
-
-
-// Helper to build Int32Constant or Int64Constant depending on the given
-// machine type.
-Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
-                    int64_t value) {
-  switch (type) {
-    case kMachInt32:
-      return m.Int32Constant(value);
-      break;
-
-    case kMachInt64:
-      return m.Int64Constant(value);
-      break;
-
-    default:
-      UNIMPLEMENTED();
-  }
-  return NULL;
-}
-
-
-// ARM64 logical instructions.
-static const MachInst2 kLogicalInstructions[] = {
-    {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
-    {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
-    {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
-    {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
-    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Xor32, kMachInt32},
-    {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Xor, kMachInt64}};
-
-
-// ARM64 logical immediates: contiguous set bits, rotated about a power of two
-// sized block. The block is then duplicated across the word. Below is a random
-// subset of the 32-bit immediates.
-static const uint32_t kLogicalImmediates[] = {
-    0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
-    0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
-    0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
-    0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
-    0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
-    0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
-    0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
-    0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
-    0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
-    0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
-    0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
-    0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
-    0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
-    0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
-    0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
-    0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
-    0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
-
-
-// ARM64 arithmetic instructions.
-static const MachInst2 kAddSubInstructions[] = {
-    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
-    {&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
-    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
-    {&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64}};
-
-
-// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
-// Below is a combination of a random subset and some edge values.
-static const int32_t kAddSubImmediates[] = {
-    0,        1,        69,       493,      599,      701,      719,
-    768,      818,      842,      945,      1246,     1286,     1429,
-    1669,     2171,     2179,     2182,     2254,     2334,     2338,
-    2343,     2396,     2449,     2610,     2732,     2855,     2876,
-    2944,     3377,     3458,     3475,     3476,     3540,     3574,
-    3601,     3813,     3871,     3917,     4095,     4096,     16384,
-    364544,   462848,   970752,   1523712,  1863680,  2363392,  3219456,
-    3280896,  4247552,  4526080,  4575232,  4960256,  5505024,  5894144,
-    6004736,  6193152,  6385664,  6795264,  7114752,  7233536,  7348224,
-    7499776,  7573504,  7729152,  8634368,  8937472,  9465856,  10354688,
-    10682368, 11059200, 11460608, 13168640, 13176832, 14336000, 15028224,
-    15597568, 15892480, 16773120};
-
-
-// ARM64 flag setting data processing instructions.
-static const MachInst2 kDPFlagSetInstructions[] = {
-    {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
-    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
-    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32}};
-
-
-// ARM64 arithmetic with overflow instructions.
-static const MachInst2 kOvfAddSubInstructions[] = {
-    {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
-     kArm64Add32, kMachInt32},
-    {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
-     kArm64Sub32, kMachInt32}};
-
-
-// ARM64 shift instructions.
-static const MachInst2 kShiftInstructions[] = {
-    {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32},
-    {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64},
-    {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32},
-    {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64},
-    {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32},
-    {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64},
-    {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
-    {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
-
-
-// ARM64 Mul/Div instructions.
-static const MachInst2 kMulDivInstructions[] = {
-    {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
-    {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
-    {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
-    {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
-    {&RawMachineAssembler::Int32UDiv, "Int32UDiv", kArm64Udiv32, kMachInt32},
-    {&RawMachineAssembler::Int64UDiv, "Int64UDiv", kArm64Udiv, kMachInt64}};
-
-
-// ARM64 FP arithmetic instructions.
-static const MachInst2 kFPArithInstructions[] = {
-    {&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
-     kMachFloat64},
-    {&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
-     kMachFloat64},
-    {&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
-     kMachFloat64},
-    {&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
-     kMachFloat64}};
-
-
-struct FPCmp {
-  MachInst2 mi;
-  FlagsCondition cond;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
-  return os << cmp.mi;
-}
-
-
-// ARM64 FP comparison instructions.
-static const FPCmp kFPCmpInstructions[] = {
-    {{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
-      kMachFloat64},
-     kUnorderedEqual},
-    {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
-      kArm64Float64Cmp, kMachFloat64},
-     kUnorderedLessThan},
-    {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
-      kArm64Float64Cmp, kMachFloat64},
-     kUnorderedLessThanOrEqual}};
-
-
-struct Conversion {
-  // The machine_type field in MachInst1 represents the destination type.
-  MachInst1 mi;
-  MachineType src_machine_type;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
-  return os << conv.mi;
-}
-
-
-// ARM64 type conversion instructions.
-static const Conversion kConversionInstructions[] = {
-    {{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
-      kArm64Sxtw, kMachInt64},
-     kMachInt32},
-    {{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
-      kArm64Mov32, kMachUint64},
-     kMachUint32},
-    {{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
-      kArm64Mov32, kMachInt32},
-     kMachInt64},
-    {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
-      kArm64Int32ToFloat64, kMachFloat64},
-     kMachInt32},
-    {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
-      kArm64Uint32ToFloat64, kMachFloat64},
-     kMachUint32},
-    {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
-      kArm64Float64ToInt32, kMachInt32},
-     kMachFloat64},
-    {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
-      kArm64Float64ToUint32, kMachUint32},
-     kMachFloat64}};
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Logical instructions.
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorLogicalTest;
-
-
-TEST_P(InstructionSelectorLogicalTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorLogicalTest, Immediate) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  // TODO(all): Add support for testing 64-bit immediates.
-  if (type == kMachInt32) {
-    // Immediate on the right.
-    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
-      StreamBuilder m(this, type, type);
-      m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-    }
-
-    // Immediate on the left; all logical ops should commute.
-    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
-      StreamBuilder m(this, type, type);
-      m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-    }
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
-                        ::testing::ValuesIn(kLogicalInstructions));
-
-
-// -----------------------------------------------------------------------------
-// Add and Sub instructions.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorAddSubTest;
-
-
-TEST_P(InstructionSelectorAddSubTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
-    Stream s = m.Build();
-
-    // Add can support an immediate on the left by commuting, but Sub can't
-    // commute. We test zero-on-left Sub later.
-    if (strstr(dpi.constructor_name, "Add") != NULL) {
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-    }
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
-                        ::testing::ValuesIn(kAddSubInstructions));
-
-
-TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
-  // Subtraction with zero on the left maps to Neg.
-  {
-    // 32-bit subtract.
-    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-    m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
-    Stream s = m.Build();
-
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
-    EXPECT_EQ(1U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-  {
-    // 64-bit subtract.
-    StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
-    m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
-    Stream s = m.Build();
-
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
-    EXPECT_EQ(1U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Data processing controlled branches.
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorDPFlagSetTest;
-
-
-TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  MLabel a, b;
-  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(1));
-  m.Bind(&b);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorDPFlagSetTest,
-                        ::testing::ValuesIn(kDPFlagSetInstructions));
-
-
-TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnRight) {
-  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
-    ASSERT_LE(1U, s[0]->InputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(1));
-    m.Bind(&b);
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
-    ASSERT_LE(1U, s[0]->InputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Add and subtract instructions with overflow.
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorOvfAddSubTest;
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return(
-      m.Projection(1, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_LE(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, OvfImmediateOnRight) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    m.Return(m.Projection(
-        1, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, ValParameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return(
-      m.Projection(0, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_LE(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, ValImmediateOnRight) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    m.Return(m.Projection(
-        0, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, BothParameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-  Stream s = m.Build();
-  ASSERT_LE(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(2U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, BothImmediateOnRight) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  MLabel a, b;
-  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
-  m.Branch(m.Projection(1, n), &a, &b);
-  m.Bind(&a);
-  m.Return(m.Int32Constant(0));
-  m.Bind(&b);
-  m.Return(m.Projection(0, n));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(4U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-  EXPECT_EQ(kOverflow, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, type, type);
-    MLabel a, b;
-    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
-    m.Branch(m.Projection(1, n), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(0));
-    m.Bind(&b);
-    m.Return(m.Projection(0, n));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(4U, s[0]->InputCount());
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorOvfAddSubTest,
-                        ::testing::ValuesIn(kOvfAddSubInstructions));
-
-
-TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
-    Stream s = m.Build();
-
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
-    EXPECT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Projection(
-        0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
-    Stream s = m.Build();
-
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_LE(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
-    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
-    Stream s = m.Build();
-
-    ASSERT_LE(1U, s.size());
-    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(2U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    MLabel a, b;
-    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
-    m.Branch(m.Projection(1, n), &a, &b);
-    m.Bind(&a);
-    m.Return(m.Int32Constant(0));
-    m.Bind(&b);
-    m.Return(m.Projection(0, n));
-    Stream s = m.Build();
-
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
-    ASSERT_EQ(4U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
-    EXPECT_EQ(kOverflow, s[0]->flags_condition());
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Shift instructions.
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorShiftTest;
-
-
-TEST_P(InstructionSelectorShiftTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorShiftTest, Immediate) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
-    StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(2U, s[0]->InputCount());
-    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
-                        ::testing::ValuesIn(kShiftInstructions));
-
-
-// -----------------------------------------------------------------------------
-// Mul and Div instructions.
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorMulDivTest;
-
-
-TEST_P(InstructionSelectorMulDivTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
-                        ::testing::ValuesIn(kMulDivInstructions));
-
-
-// -----------------------------------------------------------------------------
-// Floating point instructions.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorFPArithTest;
-
-
-TEST_P(InstructionSelectorFPArithTest, Parameter) {
-  const MachInst2 fpa = GetParam();
-  StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
-  m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
-                        ::testing::ValuesIn(kFPArithInstructions));
-
-
-typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
-
-
-TEST_P(InstructionSelectorFPCmpTest, Parameter) {
-  const FPCmp cmp = GetParam();
-  StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
-  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(cmp.cond, s[0]->flags_condition());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
-                        ::testing::ValuesIn(kFPCmpInstructions));
-
-
-// -----------------------------------------------------------------------------
-// Conversions.
-
-typedef InstructionSelectorTestWithParam<Conversion>
-    InstructionSelectorConversionTest;
-
-
-TEST_P(InstructionSelectorConversionTest, Parameter) {
-  const Conversion conv = GetParam();
-  StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
-  m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(1U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorConversionTest,
-                        ::testing::ValuesIn(kConversionInstructions));
-
-
-// -----------------------------------------------------------------------------
-// Memory access instructions.
-
-
-namespace {
-
-struct MemoryAccess {
-  MachineType type;
-  ArchOpcode ldr_opcode;
-  ArchOpcode str_opcode;
-  const int32_t immediates[20];
-};
-
-
-std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
-  OStringStream ost;
-  ost << memacc.type;
-  return os << ost.c_str();
-}
-
-}  // namespace
-
-
-static const MemoryAccess kMemoryAccesses[] = {
-    {kMachInt8, kArm64Ldrsb, kArm64Strb,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
-      2121, 2442, 4093, 4094, 4095}},
-    {kMachUint8, kArm64Ldrb, kArm64Strb,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
-      2121, 2442, 4093, 4094, 4095}},
-    {kMachInt16, kArm64Ldrsh, kArm64Strh,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
-      4100, 4242, 6786, 8188, 8190}},
-    {kMachUint16, kArm64Ldrh, kArm64Strh,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
-      4100, 4242, 6786, 8188, 8190}},
-    {kMachInt32, kArm64LdrW, kArm64StrW,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
-      8196, 3276, 3280, 16376, 16380}},
-    {kMachUint32, kArm64LdrW, kArm64StrW,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
-      8196, 3276, 3280, 16376, 16380}},
-    {kMachInt64, kArm64Ldr, kArm64Str,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
-      8200, 16384, 16392, 32752, 32760}},
-    {kMachUint64, kArm64Ldr, kArm64Str,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
-      8200, 16384, 16392, 32752, 32760}},
-    {kMachFloat32, kArm64LdrS, kArm64StrS,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
-      8196, 3276, 3280, 16376, 16380}},
-    {kMachFloat64, kArm64LdrD, kArm64StrD,
-     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
-      8200, 16384, 16392, 32752, 32760}}};
-
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
-    InstructionSelectorMemoryAccessTest;
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
-  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, memacc.immediates) {
-    StreamBuilder m(this, memacc.type, kMachPtr);
-    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
-    EXPECT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    ASSERT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
-  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
-  EXPECT_EQ(3U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, memacc.immediates) {
-    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
-    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
-            m.Parameter(1));
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
-    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(0U, s[0]->OutputCount());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorMemoryAccessTest,
-                        ::testing::ValuesIn(kMemoryAccesses));
-
-
-// -----------------------------------------------------------------------------
-// Comparison instructions.
-
-static const MachInst2 kComparisonInstructions[] = {
-    {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
-    {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
-};
-
-
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorComparisonTest;
-
-
-TEST_P(InstructionSelectorComparisonTest, WithParameters) {
-  const MachInst2 cmp = GetParam();
-  const MachineType type = cmp.machine_type;
-  StreamBuilder m(this, type, type, type);
-  m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-  EXPECT_EQ(kEqual, s[0]->flags_condition());
-}
-
-
-TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
-  const MachInst2 cmp = GetParam();
-  const MachineType type = cmp.machine_type;
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    // Compare with 0 are turned into tst instruction.
-    if (imm == 0) continue;
-    StreamBuilder m(this, type, type);
-    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-    // Compare with 0 are turned into tst instruction.
-    if (imm == 0) continue;
-    StreamBuilder m(this, type, type);
-    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorComparisonTest,
-                        ::testing::ValuesIn(kComparisonInstructions));
-
-
-TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
-  {
-    StreamBuilder m(this, kMachInt64, kMachInt64);
-    m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-  {
-    StreamBuilder m(this, kMachInt64, kMachInt64);
-    m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-    EXPECT_EQ(kEqual, s[0]->flags_condition());
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 472ce6f..72661af 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -44,6 +44,10 @@
       value = OpParameter<int64_t>(node);
     else
       return false;
+    return CanBeImmediate(value, mode);
+  }
+
+  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
     unsigned ignored;
     switch (mode) {
       case kLogical32Imm:
@@ -55,7 +59,6 @@
         return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
                                        &ignored, &ignored, &ignored);
       case kArithmeticImm:
-        // TODO(dcarney): -values can be handled by instruction swapping
         return Assembler::IsImmAddSub(value);
       case kShift32Imm:
         return 0 <= value && value < 32;
@@ -83,6 +86,14 @@
 };
 
 
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
                      Node* node) {
   Arm64OperandGenerator g(selector);
@@ -110,6 +121,51 @@
 }
 
 
+template <typename Matcher>
+static bool TryMatchShift(InstructionSelector* selector, Node* node,
+                          InstructionCode* opcode, IrOpcode::Value shift_opcode,
+                          ImmediateMode imm_mode,
+                          AddressingMode addressing_mode) {
+  if (node->opcode() != shift_opcode) return false;
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  if (g.CanBeImmediate(m.right().node(), imm_mode)) {
+    *opcode |= AddressingModeField::encode(addressing_mode);
+    return true;
+  }
+  return false;
+}
+
+
+static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
+                             InstructionCode* opcode, bool try_ror) {
+  return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shl, kShift32Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shr, kShift32Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Sar, kShift32Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int32BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord32Ror,
+                         kShift32Imm, kMode_Operand2_R_ROR_I)) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shl, kShift64Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shr, kShift64Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Sar, kShift64Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int64BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord64Ror,
+                         kShift64Imm, kMode_Operand2_R_ROR_I));
+}
+
+
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -121,9 +177,32 @@
   size_t input_count = 0;
   InstructionOperand* outputs[2];
   size_t output_count = 0;
+  bool try_ror_operand = true;
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+  if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
+    try_ror_operand = false;
+  }
+
+  if (g.CanBeImmediate(m.right().node(), operand_mode)) {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.right().node());
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else if (m.HasProperty(Operator::kCommutative) &&
+             TryMatchAnyShift(selector, m.left().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
@@ -155,6 +234,22 @@
 }
 
 
+template <typename Matcher>
+static void VisitAddSub(InstructionSelector* selector, Node* node,
+                        ArchOpcode opcode, ArchOpcode negate_opcode) {
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  if (m.right().HasValue() && (m.right().Value() < 0) &&
+      g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
+    selector->Emit(negate_opcode, g.DefineAsRegister(node),
+                   g.UseRegister(m.left().node()),
+                   g.TempImmediate(-m.right().Value()));
+  } else {
+    VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
+  }
+}
+
+
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
@@ -267,75 +362,339 @@
 }
 
 
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  Arm64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+       g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  Arm64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+       g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
+}
+
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+                         ArchOpcode opcode, bool left_can_cover,
+                         bool right_can_cover, ImmediateMode imm_mode) {
+  Arm64OperandGenerator g(selector);
+
+  // Map instruction to equivalent operation with inverted right input.
+  ArchOpcode inv_opcode = opcode;
+  switch (opcode) {
+    case kArm64And32:
+      inv_opcode = kArm64Bic32;
+      break;
+    case kArm64And:
+      inv_opcode = kArm64Bic;
+      break;
+    case kArm64Or32:
+      inv_opcode = kArm64Orn32;
+      break;
+    case kArm64Or:
+      inv_opcode = kArm64Orn;
+      break;
+    case kArm64Eor32:
+      inv_opcode = kArm64Eon32;
+      break;
+    case kArm64Eor:
+      inv_opcode = kArm64Eon;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+    Matcher mleft(m->left().node());
+    if (mleft.right().Is(-1)) {
+      // TODO(all): support shifted operand on right.
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->right().node()),
+                     g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+
+  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+      right_can_cover) {
+    Matcher mright(m->right().node());
+    if (mright.right().Is(-1)) {
+      // TODO(all): support shifted operand on right.
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->left().node()),
+                     g.UseRegister(mright.left().node()));
+      return;
+    }
+  }
+
+  if (m->IsWord32Xor() && m->right().Is(-1)) {
+    selector->Emit(kArm64Not32, g.DefineAsRegister(node),
+                   g.UseRegister(m->left().node()));
+  } else if (m->IsWord64Xor() && m->right().Is(-1)) {
+    selector->Emit(kArm64Not, g.DefineAsRegister(node),
+                   g.UseRegister(m->left().node()));
+  } else {
+    VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+  }
+}
+
+
 void InstructionSelector::VisitWord32And(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+      m.right().HasValue()) {
+    uint32_t mask = m.right().Value();
+    uint32_t mask_width = base::bits::CountPopulation32(mask);
+    uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+    if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+      // The mask must be contiguous, and occupy the least-significant bits.
+      DCHECK_EQ(0, base::bits::CountTrailingZeros32(mask));
+
+      // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
+      // significant bits.
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 31)) {
+        // Ubfx cannot extract bits past the register size, however since
+        // shifting the original value would have introduced some zeros we can
+        // still use ubfx with a smaller mask and the remaining bits will be
+        // zeros.
+        uint32_t lsb = mleft.right().Value();
+        if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+        Emit(kArm64Ubfx32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+        return;
+      }
+      // Other cases fall through to the normal And operation.
+    }
+  }
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kArm64And32, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical32Imm);
 }
 
 
 void InstructionSelector::VisitWord64And(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+      m.right().HasValue()) {
+    uint64_t mask = m.right().Value();
+    uint64_t mask_width = base::bits::CountPopulation64(mask);
+    uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+    if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+      // The mask must be contiguous, and occupy the least-significant bits.
+      DCHECK_EQ(0, base::bits::CountTrailingZeros64(mask));
+
+      // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
+      // significant bits.
+      Int64BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 63)) {
+        // Ubfx cannot extract bits past the register size, however since
+        // shifting the original value would have introduced some zeros we can
+        // still use ubfx with a smaller mask and the remaining bits will be
+        // zeros.
+        uint64_t lsb = mleft.right().Value();
+        if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+        Emit(kArm64Ubfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
+        return;
+      }
+      // Other cases fall through to the normal And operation.
+    }
+  }
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kArm64And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
+  Int32BinopMatcher m(node);
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Or(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
+  Int64BinopMatcher m(node);
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kArm64Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Xor(Node* node) {
-  Arm64OperandGenerator g(this);
   Int32BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
-  }
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Xor(Node* node) {
-  Arm64OperandGenerator g(this);
   Int64BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
-  }
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kLogical64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Shl(Node* node) {
-  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+  VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Shl(Node* node) {
-  VisitRRO(this, kArm64Shl, node, kShift64Imm);
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+    // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kArm64Lsl, g.DefineAsRegister(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
+  VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Shr(Node* node) {
-  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    int32_t lsb = m.right().Value();
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+      uint32_t mask_width = base::bits::CountPopulation32(mask);
+      uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+      // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
+      // shifted into the least-significant bits.
+      if ((mask_msb + mask_width + lsb) == 32) {
+        DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+        Emit(kArm64Ubfx32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+             g.TempImmediate(mask_width));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Shr(Node* node) {
-  VisitRRO(this, kArm64Shr, node, kShift64Imm);
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    int64_t lsb = m.right().Value();
+    Int64BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
+      // shifted into the least-significant bits.
+      uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+      uint64_t mask_width = base::bits::CountPopulation64(mask);
+      uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
+      if ((mask_msb + mask_width + lsb) == 64) {
+        DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+        Emit(kArm64Ubfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+             g.TempImmediate(mask_width));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kArm64Lsr, node, kShift64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
-  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
+  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kArm64Sxth32, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kArm64Sxtb32, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+  VisitRRO(this, kArm64Asr32, node, kShift32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  VisitRRO(this, kArm64Sar, node, kShift64Imm);
+  VisitRRO(this, kArm64Asr, node, kShift64Imm);
 }
 
 
@@ -350,23 +709,69 @@
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Select Madd(x, y, z) for Add(Mul(x, y), z).
+  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArm64Madd32, g.DefineAsRegister(node),
+         g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+    return;
+  }
+  // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArm64Madd32, g.DefineAsRegister(node),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
 }
 
 
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // Select Madd(x, y, z) for Add(Mul(x, y), z).
+  if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
+    Int64BinopMatcher mleft(m.left().node());
+    Emit(kArm64Madd, g.DefineAsRegister(node),
+         g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+    return;
+  }
+  // Select Madd(x, y, z) for Add(x, Mul(x, y)).
+  if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+    Int64BinopMatcher mright(m.right().node());
+    Emit(kArm64Madd, g.DefineAsRegister(node),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
 }
 
 
 void InstructionSelector::VisitInt32Sub(Node* node) {
   Arm64OperandGenerator g(this);
   Int32BinopMatcher m(node);
+
+  // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArm64Msub32, g.DefineAsRegister(node),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+
   if (m.left().Is(0)) {
     Emit(kArm64Neg32, g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
+    VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
   }
 }
 
@@ -374,24 +779,107 @@
 void InstructionSelector::VisitInt64Sub(Node* node) {
   Arm64OperandGenerator g(this);
   Int64BinopMatcher m(node);
+
+  // Select Msub(a, x, y) for Sub(a, Mul(x, y)).
+  if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
+    Int64BinopMatcher mright(m.right().node());
+    Emit(kArm64Msub, g.DefineAsRegister(node),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+
   if (m.left().Is(0)) {
     Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+    VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
   }
 }
 
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+
+  if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+
+    // Select Mneg(x, y) for Mul(Sub(0, x), y).
+    if (mleft.left().Is(0)) {
+      Emit(kArm64Mneg32, g.DefineAsRegister(node),
+           g.UseRegister(mleft.right().node()),
+           g.UseRegister(m.right().node()));
+      return;
+    }
+  }
+
+  if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+
+    // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+    if (mright.left().Is(0)) {
+      Emit(kArm64Mneg32, g.DefineAsRegister(node),
+           g.UseRegister(m.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
+
   VisitRRR(this, kArm64Mul32, node);
 }
 
 
 void InstructionSelector::VisitInt64Mul(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+
+  if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
+    Int64BinopMatcher mleft(m.left().node());
+
+    // Select Mneg(x, y) for Mul(Sub(0, x), y).
+    if (mleft.left().Is(0)) {
+      Emit(kArm64Mneg, g.DefineAsRegister(node),
+           g.UseRegister(mleft.right().node()),
+           g.UseRegister(m.right().node()));
+      return;
+    }
+  }
+
+  if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
+    Int64BinopMatcher mright(m.right().node());
+
+    // Select Mneg(x, y) for Mul(x, Sub(0, y)).
+    if (mright.left().Is(0)) {
+      Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(mright.right().node()));
+      return;
+    }
+  }
+
   VisitRRR(this, kArm64Mul, node);
 }
 
 
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  // TODO(arm64): Can we do better here?
+  Arm64OperandGenerator g(this);
+  InstructionOperand* const smull_operand = g.TempRegister();
+  Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+  Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  // TODO(arm64): Can we do better here?
+  Arm64OperandGenerator g(this);
+  InstructionOperand* const smull_operand = g.TempRegister();
+  Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+  Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
+}
+
+
 void InstructionSelector::VisitInt32Div(Node* node) {
   VisitRRR(this, kArm64Idiv32, node);
 }
@@ -402,12 +890,12 @@
 }
 
 
-void InstructionSelector::VisitInt32UDiv(Node* node) {
+void InstructionSelector::VisitUint32Div(Node* node) {
   VisitRRR(this, kArm64Udiv32, node);
 }
 
 
-void InstructionSelector::VisitInt64UDiv(Node* node) {
+void InstructionSelector::VisitUint64Div(Node* node) {
   VisitRRR(this, kArm64Udiv, node);
 }
 
@@ -422,16 +910,23 @@
 }
 
 
-void InstructionSelector::VisitInt32UMod(Node* node) {
+void InstructionSelector::VisitUint32Mod(Node* node) {
   VisitRRR(this, kArm64Umod32, node);
 }
 
 
-void InstructionSelector::VisitInt64UMod(Node* node) {
+void InstructionSelector::VisitUint64Mod(Node* node) {
   VisitRRR(this, kArm64Umod, node);
 }
 
 
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   Arm64OperandGenerator g(this);
   Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
@@ -468,12 +963,65 @@
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   Arm64OperandGenerator g(this);
-  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kWord32Equal:
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32SubWithOverflow:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kUint32MulHigh: {
+      // 32-bit operations will write their result in a W register (implicitly
+      // clearing the top 32-bit of the corresponding X register) so the
+      // zero-extension is a no-op.
+      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      return;
+    }
+    default:
+      break;
+  }
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
 }
 
 
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    Int64BinopMatcher m(value);
+    if ((m.IsWord64Sar() && m.right().HasValue() &&
+         (m.right().Value() == 32)) ||
+        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+      Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
+  }
+
   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }
 
@@ -507,125 +1055,38 @@
 
 
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Sqrt, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
   Arm64OperandGenerator g(this);
-  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
-}
-
-
-// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
-                         InstructionOperand* left, InstructionOperand* right,
-                         FlagsContinuation* cont) {
-  Arm64OperandGenerator g(selector);
-  opcode = cont->Encode(opcode);
-  if (cont->IsBranch()) {
-    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
-                   g.Label(cont->false_block()))->MarkAsControl();
-  } else {
-    DCHECK(cont->IsSet());
-    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
-  }
-}
-
-
-// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
-                             InstructionCode opcode, FlagsContinuation* cont,
-                             bool commutative) {
-  Arm64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right, kArithmeticImm)) {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
-                 cont);
-  } else if (g.CanBeImmediate(left, kArithmeticImm)) {
-    if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
-                 cont);
-  } else {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
-                 cont);
-  }
-}
-
-
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kInt32Add:
-      return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
-    case IrOpcode::kInt32Sub:
-      return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
-    case IrOpcode::kWord32And:
-      return VisitWordCompare(this, node, kArm64Tst32, cont, true);
-    default:
-      break;
-  }
-
-  Arm64OperandGenerator g(this);
-  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
-               cont);
-}
-
-
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kWord64And:
-      return VisitWordCompare(this, node, kArm64Tst, cont, true);
-    default:
-      break;
-  }
-
-  Arm64OperandGenerator g(this);
-  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
-}
-
-
-void InstructionSelector::VisitWord32Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
-}
-
-
-void InstructionSelector::VisitWord64Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kArm64Cmp, cont, false);
-}
-
-
-void InstructionSelector::VisitFloat64Compare(Node* node,
-                                              FlagsContinuation* cont) {
-  Arm64OperandGenerator g(this);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-  VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
-               g.UseRegister(right), cont);
-}
-
-
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
-                                    BasicBlock* deoptimization) {
-  Arm64OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
     frame_state_descriptor =
-        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
   }
 
   CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@@ -634,7 +1095,7 @@
   // TODO(turbofan): on ARM64 it's probably better to use the code object in a
   // register if there are multiple uses of it. Improve constant pool and the
   // heuristics in the register allocator for where to emit constants.
-  InitializeCallBuffer(call, &buffer, true, false);
+  InitializeCallBuffer(node, &buffer, true, false);
 
   // Push the arguments to the stack.
   bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
@@ -681,17 +1142,389 @@
   opcode |= MiscField::encode(descriptor->flags());
 
   // Emit the call instruction.
+  InstructionOperand** first_output =
+      buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
   Instruction* call_instr =
-      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+      Emit(opcode, buffer.outputs.size(), first_output,
            buffer.instruction_args.size(), &buffer.instruction_args.front());
-
   call_instr->MarkAsCall();
-  if (deoptimization != NULL) {
-    DCHECK(continuation != NULL);
-    call_instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   }
 }
 
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative, ImmediateMode immediate_mode) {
+  Arm64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, immediate_mode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, immediate_mode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+static void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                               FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kArm64Cmp32, cont, false, kArithmeticImm);
+}
+
+
+static void VisitWordTest(InstructionSelector* selector, Node* node,
+                          InstructionCode opcode, FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
+               cont);
+}
+
+
+static void VisitWord32Test(InstructionSelector* selector, Node* node,
+                            FlagsContinuation* cont) {
+  VisitWordTest(selector, node, kArm64Tst32, cont);
+}
+
+
+static void VisitWord64Test(InstructionSelector* selector, Node* node,
+                            FlagsContinuation* cont) {
+  VisitWordTest(selector, node, kArm64Tst, cont);
+}
+
+
+// Shared routine for multiple float compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                                FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  OperandGenerator g(this);
+  Node* user = branch;
+  Node* value = branch->InputAt(0);
+
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  // Try to combine the branch with a comparison.
+  if (CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kInt32LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kUint32LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kWord64Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+                                kArithmeticImm);
+      case IrOpcode::kInt64LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+                                kArithmeticImm);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+                                kArithmeticImm);
+      case IrOpcode::kUint64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+                                kArithmeticImm);
+      case IrOpcode::kFloat64Equal:
+        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kFloat64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+                                                     kArithmeticImm, &cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+                                                     kArithmeticImm, &cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Add:
+        return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+                                kArithmeticImm);
+      case IrOpcode::kInt32Sub:
+        return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
+                                kArithmeticImm);
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher m(value);
+        if (m.right().HasValue() &&
+            (base::bits::CountPopulation32(m.right().Value()) == 1)) {
+          // If the mask has only one bit set, we can use tbz/tbnz.
+          DCHECK((cont.condition() == kEqual) ||
+                 (cont.condition() == kNotEqual));
+          Emit(cont.Encode(kArm64TestAndBranch32), NULL,
+               g.UseRegister(m.left().node()),
+               g.TempImmediate(
+                   base::bits::CountTrailingZeros32(m.right().Value())),
+               g.Label(cont.true_block()),
+               g.Label(cont.false_block()))->MarkAsControl();
+          return;
+        }
+        return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+                                kLogical32Imm);
+      }
+      case IrOpcode::kWord64And: {
+        Int64BinopMatcher m(value);
+        if (m.right().HasValue() &&
+            (base::bits::CountPopulation64(m.right().Value()) == 1)) {
+          // If the mask has only one bit set, we can use tbz/tbnz.
+          DCHECK((cont.condition() == kEqual) ||
+                 (cont.condition() == kNotEqual));
+          Emit(cont.Encode(kArm64TestAndBranch), NULL,
+               g.UseRegister(m.left().node()),
+               g.TempImmediate(
+                   base::bits::CountTrailingZeros64(m.right().Value())),
+               g.Label(cont.true_block()),
+               g.Label(cont.false_block()))->MarkAsControl();
+          return;
+        }
+        return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+                                kLogical64Imm);
+      }
+      default:
+        break;
+    }
+  }
+
+  // Branch could not be combined with a compare, compare against 0 and branch.
+  Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
+       g.Label(cont.true_block()),
+       g.Label(cont.false_block()))->MarkAsControl();
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  Node* const user = node;
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(user);
+  if (m.right().Is(0)) {
+    Node* const value = m.left().node();
+    if (CanCover(user, value)) {
+      switch (value->opcode()) {
+        case IrOpcode::kInt32Add:
+          return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+                                  kArithmeticImm);
+        case IrOpcode::kInt32Sub:
+          return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
+                                  kArithmeticImm);
+        case IrOpcode::kWord32And:
+          return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+                                  kLogical32Imm);
+        default:
+          break;
+      }
+      return VisitWord32Test(this, value, &cont);
+    }
+  }
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  Node* const user = node;
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(user);
+  if (m.right().Is(0)) {
+    Node* const value = m.left().node();
+    if (CanCover(user, value)) {
+      switch (value->opcode()) {
+        case IrOpcode::kWord64And:
+          return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+                                  kLogical64Imm);
+        default:
+          break;
+      }
+      return VisitWord64Test(this, value, &cont);
+    }
+  }
+  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+                                         kArithmeticImm, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+                                         kArithmeticImm, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat64Floor |
+         MachineOperatorBuilder::kFloat64Ceil |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway |
+         MachineOperatorBuilder::kWord32ShiftIsSafe |
+         MachineOperatorBuilder::kInt32DivIsSafe |
+         MachineOperatorBuilder::kUint32DivIsSafe;
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
index 2be2cb1..291b552 100644
--- a/src/compiler/arm64/linkage-arm64.cc
+++ b/src/compiler/arm64/linkage-arm64.cc
@@ -35,8 +35,9 @@
 
 typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
 
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }
 
 
@@ -49,10 +50,10 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 74fb0ae..cde5e71 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -5,10 +5,12 @@
 #include "src/compiler/ast-graph-builder.h"
 
 #include "src/compiler.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/control-builders.h"
 #include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
 #include "src/full-codegen.h"
 #include "src/parser.h"
 #include "src/scopes.h"
@@ -17,14 +19,16 @@
 namespace internal {
 namespace compiler {
 
-AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph)
-    : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
+                                 JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
+    : StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
       info_(info),
       jsgraph_(jsgraph),
-      globals_(0, info->zone()),
+      globals_(0, local_zone),
       breakable_(NULL),
-      execution_context_(NULL) {
-  InitializeAstVisitor(info->zone());
+      execution_context_(NULL),
+      loop_assignment_analysis_(loop) {
+  InitializeAstVisitor(local_zone);
 }
 
 
@@ -62,20 +66,29 @@
   Environment env(this, scope, graph()->start());
   set_environment(&env);
 
+  // Initialize the incoming context.
+  Node* outer_context = GetFunctionContext();
+  set_current_context(outer_context);
+
+  // Build receiver check for sloppy mode if necessary.
+  // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+  Node* original_receiver = env.Lookup(scope->receiver());
+  Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
+  env.Bind(scope->receiver(), patched_receiver);
+
   // Build node to initialize local function context.
   Node* closure = GetFunctionClosure();
-  Node* outer = GetFunctionContext();
-  Node* inner = BuildLocalFunctionContext(outer, closure);
+  Node* inner_context = BuildLocalFunctionContext(outer_context, closure);
 
   // Push top-level function scope for the function body.
-  ContextScope top_context(this, scope, inner);
+  ContextScope top_context(this, scope, inner_context);
 
   // Build the arguments object if it is used.
   BuildArgumentsObject(scope->arguments());
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
-    NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+    NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
   }
 
   // Visit implicit declaration of the function name.
@@ -86,8 +99,8 @@
   // Visit declarations within the function scope.
   VisitDeclarations(scope->declarations());
 
-  // TODO(mstarzinger): This should do an inlined stack check.
-  Node* node = NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+  // Build a stack-check before the body.
+  Node* node = BuildStackCheck();
   PrepareFrameState(node, BailoutId::FunctionEntry());
 
   // Visit statements in the function body.
@@ -98,7 +111,7 @@
   if (FLAG_trace) {
     // TODO(mstarzinger): Only traces implicit return.
     Node* return_value = jsgraph()->UndefinedConstant();
-    NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+    NewNode(javascript()->CallRuntime(Runtime::kTraceExit, 1), return_value);
   }
 
   // Return 'undefined' in case we can fall off the end.
@@ -129,26 +142,6 @@
 }
 
 
-// Helper to find an existing shared function info in the baseline code for the
-// given function literal. Used to canonicalize SharedFunctionInfo objects.
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
-    Code* unoptimized_code, FunctionLiteral* expr) {
-  int start_position = expr->start_position();
-  for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
-    Object* obj = rinfo->target_object();
-    if (obj->IsSharedFunctionInfo()) {
-      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
-      if (shared->start_position() == start_position) {
-        return Handle<SharedFunctionInfo>(shared);
-      }
-    }
-  }
-  return Handle<SharedFunctionInfo>();
-}
-
-
 StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
     StructuredGraphBuilder::Environment* env) {
   return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
@@ -329,24 +322,40 @@
 
 void AstGraphBuilder::VisitForValue(Expression* expr) {
   AstValueContext for_value(this);
-  if (!HasStackOverflow()) {
+  if (!CheckStackOverflow()) {
     expr->Accept(this);
+  } else {
+    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
   }
 }
 
 
 void AstGraphBuilder::VisitForEffect(Expression* expr) {
   AstEffectContext for_effect(this);
-  if (!HasStackOverflow()) {
+  if (!CheckStackOverflow()) {
     expr->Accept(this);
+  } else {
+    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
   }
 }
 
 
 void AstGraphBuilder::VisitForTest(Expression* expr) {
   AstTestContext for_condition(this);
-  if (!HasStackOverflow()) {
+  if (!CheckStackOverflow()) {
     expr->Accept(this);
+  } else {
+    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+  }
+}
+
+
+void AstGraphBuilder::Visit(Expression* expr) {
+  // Reuses enclosing AstContext.
+  if (!CheckStackOverflow()) {
+    expr->Accept(this);
+  } else {
+    ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
   }
 }
 
@@ -360,8 +369,8 @@
       Handle<Oddball> value = variable->binding_needs_init()
                                   ? isolate()->factory()->the_hole_value()
                                   : isolate()->factory()->undefined_value();
-      globals()->Add(variable->name(), zone());
-      globals()->Add(value, zone());
+      globals()->push_back(variable->name());
+      globals()->push_back(value);
       break;
     }
     case Variable::PARAMETER:
@@ -392,8 +401,8 @@
           Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
-      globals()->Add(variable->name(), zone());
-      globals()->Add(function, zone());
+      globals()->push_back(variable->name());
+      globals()->push_back(function);
       break;
     }
     case Variable::PARAMETER:
@@ -580,7 +589,7 @@
 
 void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
   LoopBuilder while_loop(this);
-  while_loop.BeginLoop();
+  while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
   VisitIterationBody(stmt, &while_loop, 0);
   while_loop.EndBody();
   VisitForTest(stmt->cond());
@@ -592,7 +601,7 @@
 
 void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
   LoopBuilder while_loop(this);
-  while_loop.BeginLoop();
+  while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
   VisitForTest(stmt->cond());
   Node* condition = environment()->Pop();
   while_loop.BreakUnless(condition);
@@ -605,11 +614,13 @@
 void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
   LoopBuilder for_loop(this);
   VisitIfNotNull(stmt->init());
-  for_loop.BeginLoop();
+  for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
   if (stmt->cond() != NULL) {
     VisitForTest(stmt->cond());
     Node* condition = environment()->Pop();
     for_loop.BreakUnless(condition);
+  } else {
+    for_loop.BreakUnless(jsgraph()->TrueConstant());
   }
   VisitIterationBody(stmt, &for_loop, 0);
   for_loop.EndBody();
@@ -639,24 +650,27 @@
     // Convert object to jsobject.
     // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
     obj = NewNode(javascript()->ToObject(), obj);
+    PrepareFrameState(obj, stmt->ToObjectId(), OutputFrameStateCombine::Push());
     environment()->Push(obj);
     // TODO(dcarney): should do a fast enum cache check here to skip runtime.
     environment()->Push(obj);
     Node* cache_type = ProcessArguments(
-        javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+        javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), 1);
+    PrepareFrameState(cache_type, stmt->EnumId(),
+                      OutputFrameStateCombine::Push());
     // TODO(dcarney): these next runtime calls should be removed in favour of
     //                a few simplified instructions.
     environment()->Push(obj);
     environment()->Push(cache_type);
     Node* cache_pair =
-        ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+        ProcessArguments(javascript()->CallRuntime(Runtime::kForInInit, 2), 2);
     // cache_type may have been replaced.
     Node* cache_array = NewNode(common()->Projection(0), cache_pair);
     cache_type = NewNode(common()->Projection(1), cache_pair);
     environment()->Push(cache_type);
     environment()->Push(cache_array);
     Node* cache_length = ProcessArguments(
-        javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+        javascript()->CallRuntime(Runtime::kForInCacheArrayLength, 2), 2);
     {
       // TODO(dcarney): this check is actually supposed to be for the
       //                empty enum case only.
@@ -676,7 +690,7 @@
         environment()->Push(jsgraph()->ZeroConstant());
         // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
         LoopBuilder for_loop(this);
-        for_loop.BeginLoop();
+        for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt));
         // Check loop termination condition.
         Node* index = environment()->Peek(0);
         Node* exit_cond =
@@ -692,8 +706,8 @@
         environment()->Push(cache_array);
         environment()->Push(cache_type);
         environment()->Push(index);
-        Node* pair =
-            ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4);
+        Node* pair = ProcessArguments(
+            javascript()->CallRuntime(Runtime::kForInNext, 4), 4);
         Node* value = NewNode(common()->Projection(0), pair);
         Node* should_filter = NewNode(common()->Projection(1), pair);
         environment()->Push(value);
@@ -719,7 +733,7 @@
           // result is either the string key or Smi(0) indicating the property
           // is gone.
           Node* res = ProcessArguments(
-              javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+              javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS), 3);
           // TODO(jarin): provide real bailout id.
           PrepareFrameState(res, BailoutId::None());
           Node* property_missing = NewNode(javascript()->StrictEqual(), res,
@@ -785,7 +799,7 @@
 
 void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   // TODO(turbofan): Do we really need a separate reloc-info for this?
-  Node* node = NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+  Node* node = NewNode(javascript()->CallRuntime(Runtime::kDebugBreak, 0));
   PrepareFrameState(node, stmt->DebugBreakId());
 }
 
@@ -795,8 +809,8 @@
 
   // Build a new shared function info if we cannot find one in the baseline
   // code. We also have a stack overflow if the recursive compilation did.
-  Handle<SharedFunctionInfo> shared_info =
-      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+  expr->InitializeSharedInfo(handle(info()->shared_info()->code()));
+  Handle<SharedFunctionInfo> shared_info = expr->shared_info();
   if (shared_info.is_null()) {
     shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
     CHECK(!shared_info.is_null());  // TODO(mstarzinger): Set stack overflow?
@@ -804,16 +818,14 @@
 
   // Create node to instantiate a new closure.
   Node* info = jsgraph()->Constant(shared_info);
-  Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
-                                      : jsgraph()->FalseConstant();
-  const Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+  Node* pretenure = jsgraph()->BooleanConstant(expr->pretenure());
+  const Operator* op = javascript()->CallRuntime(Runtime::kNewClosure, 3);
   Node* value = NewNode(op, context, info, pretenure);
   ast_context()->ProduceValue(value);
 }
 
 
 void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
-  // TODO(arv): Implement.
   UNREACHABLE();
 }
 
@@ -838,7 +850,8 @@
 
 
 void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
-  Node* value = BuildVariableLoad(expr->var(), expr->id());
+  VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
+  Node* value = BuildVariableLoad(expr->var(), expr->id(), pair);
   ast_context()->ProduceValue(value);
 }
 
@@ -859,8 +872,9 @@
   Node* pattern = jsgraph()->Constant(expr->pattern());
   Node* flags = jsgraph()->Constant(expr->flags());
   const Operator* op =
-      javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+      javascript()->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+  PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(literal);
 }
 
@@ -875,8 +889,11 @@
   Node* literal_index = jsgraph()->Constant(expr->literal_index());
   Node* constants = jsgraph()->Constant(expr->constant_properties());
   Node* flags = jsgraph()->Constant(expr->ComputeFlags());
-  const Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+  const Operator* op =
+      javascript()->CallRuntime(Runtime::kCreateObjectLiteral, 4);
   Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+  PrepareFrameState(literal, expr->CreateLiteralId(),
+                    OutputFrameStateCombine::Push());
 
   // The object is expected on the operand stack during computation of the
   // property values and is the value of the entire expression.
@@ -924,7 +941,8 @@
         Node* receiver = environment()->Pop();
         if (property->emit_store()) {
           Node* strict = jsgraph()->Constant(SLOPPY);
-          const Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+          const Operator* op =
+              javascript()->CallRuntime(Runtime::kSetProperty, 4);
           NewNode(op, receiver, key, value, strict);
         }
         break;
@@ -935,8 +953,12 @@
         Node* value = environment()->Pop();
         Node* receiver = environment()->Pop();
         if (property->emit_store()) {
-          const Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
-          NewNode(op, receiver, value);
+          const Operator* op =
+              javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+          Node* set_prototype = NewNode(op, receiver, value);
+          // SetPrototype should not lazy deopt on an object
+          // literal.
+          PrepareFrameState(set_prototype, BailoutId::None());
         }
         break;
       }
@@ -961,14 +983,16 @@
     Node* name = environment()->Pop();
     Node* attr = jsgraph()->Constant(NONE);
     const Operator* op =
-        javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+        javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
     Node* call = NewNode(op, literal, name, getter, setter, attr);
-    PrepareFrameState(call, it->first->id());
+    // This should not lazy deopt on a new literal.
+    PrepareFrameState(call, BailoutId::None());
   }
 
   // Transform literals that contain functions to fast properties.
   if (expr->has_function()) {
-    const Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+    const Operator* op =
+        javascript()->CallRuntime(Runtime::kToFastProperties, 1);
     NewNode(op, literal);
   }
 
@@ -986,7 +1010,8 @@
   Node* literal_index = jsgraph()->Constant(expr->literal_index());
   Node* constants = jsgraph()->Constant(expr->constant_elements());
   Node* flags = jsgraph()->Constant(expr->ComputeFlags());
-  const Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+  const Operator* op =
+      javascript()->CallRuntime(Runtime::kCreateArrayLiteral, 4);
   Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
 
   // The array and the literal index are both expected on the operand stack
@@ -1086,23 +1111,31 @@
     Node* old_value = NULL;
     switch (assign_type) {
       case VARIABLE: {
-        Variable* variable = expr->target()->AsVariableProxy()->var();
-        old_value = BuildVariableLoad(variable, expr->target()->id());
+        VariableProxy* proxy = expr->target()->AsVariableProxy();
+        VectorSlotPair pair =
+            CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+        old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair);
         break;
       }
       case NAMED_PROPERTY: {
         Node* object = environment()->Top();
         Unique<Name> name =
             MakeUnique(property->key()->AsLiteral()->AsPropertyName());
-        old_value = NewNode(javascript()->LoadNamed(name), object);
-        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        VectorSlotPair pair =
+            CreateVectorSlotPair(property->PropertyFeedbackSlot());
+        old_value = NewNode(javascript()->LoadNamed(name, pair), object);
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
       case KEYED_PROPERTY: {
         Node* key = environment()->Top();
         Node* object = environment()->Peek(1);
-        old_value = NewNode(javascript()->LoadProperty(), object, key);
-        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        VectorSlotPair pair =
+            CreateVectorSlotPair(property->PropertyFeedbackSlot());
+        old_value = NewNode(javascript()->LoadProperty(pair), object, key);
+        PrepareFrameState(old_value, property->LoadId(),
+                          OutputFrameStateCombine::Push());
         break;
       }
     }
@@ -1111,7 +1144,8 @@
     Node* right = environment()->Pop();
     Node* left = environment()->Pop();
     Node* value = BuildBinaryOp(left, right, expr->binary_op());
-    PrepareFrameState(value, expr->binary_operation()->id(), kPushOutput);
+    PrepareFrameState(value, expr->binary_operation()->id(),
+                      OutputFrameStateCombine::Push());
     environment()->Push(value);
   } else {
     VisitForValue(expr->value());
@@ -1122,8 +1156,8 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->target()->AsVariableProxy()->var();
-      BuildVariableAssignment(variable, value, expr->op(),
-                              expr->AssignmentId());
+      BuildVariableAssignment(variable, value, expr->op(), expr->AssignmentId(),
+                              ast_context()->GetStateCombine());
       break;
     }
     case NAMED_PROPERTY: {
@@ -1132,7 +1166,8 @@
           MakeUnique(property->key()->AsLiteral()->AsPropertyName());
       Node* store =
           NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
-      PrepareFrameState(store, expr->AssignmentId());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        ast_context()->GetStateCombine());
       break;
     }
     case KEYED_PROPERTY: {
@@ -1140,7 +1175,8 @@
       Node* object = environment()->Pop();
       Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
                             key, value);
-      PrepareFrameState(store, expr->AssignmentId());
+      PrepareFrameState(store, expr->AssignmentId(),
+                        ast_context()->GetStateCombine());
       break;
     }
   }
@@ -1162,25 +1198,27 @@
 void AstGraphBuilder::VisitThrow(Throw* expr) {
   VisitForValue(expr->exception());
   Node* exception = environment()->Pop();
-  const Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
   Node* value = NewNode(op, exception);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
 }
 
 
 void AstGraphBuilder::VisitProperty(Property* expr) {
   Node* value;
+  VectorSlotPair pair = CreateVectorSlotPair(expr->PropertyFeedbackSlot());
   if (expr->key()->IsPropertyName()) {
     VisitForValue(expr->obj());
     Node* object = environment()->Pop();
     Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
-    value = NewNode(javascript()->LoadNamed(name), object);
+    value = NewNode(javascript()->LoadNamed(name, pair), object);
   } else {
     VisitForValue(expr->obj());
     VisitForValue(expr->key());
     Node* key = environment()->Pop();
     Node* object = environment()->Pop();
-    value = NewNode(javascript()->LoadProperty(), object, key);
+    value = NewNode(javascript()->LoadProperty(pair), object, key);
   }
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
@@ -1199,8 +1237,10 @@
   bool possibly_eval = false;
   switch (call_type) {
     case Call::GLOBAL_CALL: {
-      Variable* variable = callee->AsVariableProxy()->var();
-      callee_value = BuildVariableLoad(variable, expr->expression()->id());
+      VariableProxy* proxy = callee->AsVariableProxy();
+      VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+      callee_value =
+          BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
       receiver_value = jsgraph()->UndefinedConstant();
       break;
     }
@@ -1208,26 +1248,33 @@
       Variable* variable = callee->AsVariableProxy()->var();
       DCHECK(variable->location() == Variable::LOOKUP);
       Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+      const Operator* op =
+          javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
       Node* pair = NewNode(op, current_context(), name);
       callee_value = NewNode(common()->Projection(0), pair);
       receiver_value = NewNode(common()->Projection(1), pair);
+
+      PrepareFrameState(pair, expr->EvalOrLookupId(),
+                        OutputFrameStateCombine::Push(2));
       break;
     }
     case Call::PROPERTY_CALL: {
       Property* property = callee->AsProperty();
       VisitForValue(property->obj());
       Node* object = environment()->Top();
+      VectorSlotPair pair =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
       if (property->key()->IsPropertyName()) {
         Unique<Name> name =
             MakeUnique(property->key()->AsLiteral()->AsPropertyName());
-        callee_value = NewNode(javascript()->LoadNamed(name), object);
+        callee_value = NewNode(javascript()->LoadNamed(name, pair), object);
       } else {
         VisitForValue(property->key());
         Node* key = environment()->Pop();
-        callee_value = NewNode(javascript()->LoadProperty(), object, key);
+        callee_value = NewNode(javascript()->LoadProperty(pair), object, key);
       }
-      PrepareFrameState(callee_value, property->LoadId(), kPushOutput);
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       receiver_value = environment()->Pop();
       // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
       // object for sloppy callees. This could also be modeled explicitly here,
@@ -1235,6 +1282,11 @@
       flags = CALL_AS_METHOD;
       break;
     }
+    case Call::SUPER_CALL: {
+      // todo(dslomov): implement super calls in turbofan.
+      UNIMPLEMENTED();
+      break;
+    }
     case Call::POSSIBLY_EVAL_CALL:
       possibly_eval = true;
     // Fall through.
@@ -1270,9 +1322,11 @@
     Node* strict = jsgraph()->Constant(strict_mode());
     Node* position = jsgraph()->Constant(info()->scope()->start_position());
     const Operator* op =
-        javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 6);
+        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
     Node* pair =
         NewNode(op, callee, source, function, receiver, strict, position);
+    PrepareFrameState(pair, expr->EvalOrLookupId(),
+                      OutputFrameStateCombine::PokeAt(arg_count + 1));
     Node* new_callee = NewNode(common()->Projection(0), pair);
     Node* new_receiver = NewNode(common()->Projection(1), pair);
 
@@ -1282,7 +1336,7 @@
   }
 
   // Create node to perform the function call.
-  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  const Operator* call = javascript()->CallFunction(args->length() + 2, flags);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
@@ -1297,7 +1351,7 @@
   VisitForValues(args);
 
   // Create node to perform the construct call.
-  const Operator* call = javascript()->CallNew(args->length() + 1);
+  const Operator* call = javascript()->CallConstruct(args->length() + 1);
   Node* value = ProcessArguments(call, args->length() + 1);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
@@ -1312,10 +1366,13 @@
   CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
   Node* receiver_value = BuildLoadBuiltinsObject();
   Unique<String> unique = MakeUnique(name);
-  Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
+  VectorSlotPair pair = CreateVectorSlotPair(expr->CallRuntimeFeedbackSlot());
+  Node* callee_value =
+      NewNode(javascript()->LoadNamed(unique, pair), receiver_value);
   // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
   // refuses to optimize functions with jsruntime calls).
-  PrepareFrameState(callee_value, BailoutId::None(), kPushOutput);
+  PrepareFrameState(callee_value, BailoutId::None(),
+                    OutputFrameStateCombine::Push());
   environment()->Push(callee_value);
   environment()->Push(receiver_value);
 
@@ -1324,7 +1381,7 @@
   VisitForValues(args);
 
   // Create node to perform the JS runtime call.
-  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  const Operator* call = javascript()->CallFunction(args->length() + 2, flags);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
@@ -1347,7 +1404,7 @@
 
   // Create node to perform the runtime call.
   Runtime::FunctionId functionId = function->function_id;
-  const Operator* call = javascript()->Runtime(functionId, args->length());
+  const Operator* call = javascript()->CallRuntime(functionId, args->length());
   Node* value = ProcessArguments(call, args->length());
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(value);
@@ -1386,8 +1443,10 @@
   int stack_depth = -1;
   switch (assign_type) {
     case VARIABLE: {
-      Variable* variable = expr->expression()->AsVariableProxy()->var();
-      old_value = BuildVariableLoad(variable, expr->expression()->id());
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+      VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+      old_value =
+          BuildVariableLoad(proxy->var(), expr->expression()->id(), pair);
       stack_depth = 0;
       break;
     }
@@ -1396,8 +1455,11 @@
       Node* object = environment()->Top();
       Unique<Name> name =
           MakeUnique(property->key()->AsLiteral()->AsPropertyName());
-      old_value = NewNode(javascript()->LoadNamed(name), object);
-      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      VectorSlotPair pair =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      old_value = NewNode(javascript()->LoadNamed(name, pair), object);
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 1;
       break;
     }
@@ -1406,8 +1468,11 @@
       VisitForValue(property->key());
       Node* key = environment()->Top();
       Node* object = environment()->Peek(1);
-      old_value = NewNode(javascript()->LoadProperty(), object, key);
-      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      VectorSlotPair pair =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      old_value = NewNode(javascript()->LoadProperty(pair), object, key);
+      PrepareFrameState(old_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
       stack_depth = 2;
       break;
     }
@@ -1548,20 +1613,21 @@
 
 
 void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
-  DCHECK(globals()->is_empty());
+  DCHECK(globals()->empty());
   AstVisitor::VisitDeclarations(declarations);
-  if (globals()->is_empty()) return;
-  Handle<FixedArray> data =
-      isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
-  for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+  if (globals()->empty()) return;
+  int array_index = 0;
+  Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
+      static_cast<int>(globals()->size()), TENURED);
+  for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
   int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
                       DeclareGlobalsNativeFlag::encode(info()->is_native()) |
                       DeclareGlobalsStrictMode::encode(strict_mode());
   Node* flags = jsgraph()->Constant(encoded_flags);
   Node* pairs = jsgraph()->Constant(data);
-  const Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+  const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
   NewNode(op, current_context(), pairs, flags);
-  globals()->Rewind(0);
+  globals()->clear();
 }
 
 
@@ -1585,7 +1651,8 @@
     // deleting "this" is allowed in all language modes.
     Variable* variable = expr->expression()->AsVariableProxy()->var();
     DCHECK(strict_mode() == SLOPPY || variable->is_this());
-    value = BuildVariableDelete(variable);
+    value = BuildVariableDelete(variable, expr->id(),
+                                ast_context()->GetStateCombine());
   } else if (expr->expression()->IsProperty()) {
     Property* property = expr->expression()->AsProperty();
     VisitForValue(property->obj());
@@ -1593,6 +1660,7 @@
     Node* key = environment()->Pop();
     Node* object = environment()->Pop();
     value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+    PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
   } else {
     VisitForEffect(expr->expression());
     value = jsgraph()->TrueConstant();
@@ -1613,9 +1681,10 @@
   if (expr->expression()->IsVariableProxy()) {
     // Typeof does not throw a reference error on global variables, hence we
     // perform a non-contextual load in case the operand is a variable proxy.
-    Variable* variable = expr->expression()->AsVariableProxy()->var();
-    operand =
-        BuildVariableLoad(variable, expr->expression()->id(), NOT_CONTEXTUAL);
+    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+    operand = BuildVariableLoad(proxy->var(), expr->expression()->id(), pair,
+                                NOT_CONTEXTUAL);
   } else {
     VisitForValue(expr->expression());
     operand = environment()->Pop();
@@ -1666,6 +1735,17 @@
 }
 
 
+StrictMode AstGraphBuilder::strict_mode() const {
+  return info()->strict_mode();
+}
+
+
+VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
+    FeedbackVectorICSlot slot) const {
+  return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
+}
+
+
 Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
   DCHECK(environment()->stack_height() >= arity);
   Node** all = info()->zone()->NewArray<Node*>(arity);
@@ -1677,10 +1757,36 @@
 }
 
 
+Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
+  // Sloppy mode functions and builtins need to replace the receiver with the
+  // global proxy when called as functions (without an explicit receiver
+  // object). Otherwise there is nothing left to do here.
+  if (info()->strict_mode() != SLOPPY || info()->is_native()) return receiver;
+
+  // There is no need to perform patching if the receiver is never used. Note
+  // that scope predicates are purely syntactical, a call to eval might still
+  // inspect the receiver value.
+  if (!info()->scope()->uses_this() && !info()->scope()->inner_uses_this() &&
+      !info()->scope()->calls_sloppy_eval()) {
+    return receiver;
+  }
+
+  IfBuilder receiver_check(this);
+  Node* undefined = jsgraph()->UndefinedConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
+  receiver_check.If(check);
+  receiver_check.Then();
+  environment()->Push(BuildLoadGlobalProxy());
+  receiver_check.Else();
+  environment()->Push(receiver);
+  receiver_check.End();
+  return environment()->Pop();
+}
+
+
 Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots <= 0) return context;
-  set_current_context(context);
 
   // Allocate a new local context.
   const Operator* op = javascript()->CreateFunctionContext();
@@ -1710,7 +1816,7 @@
 
   // Allocate and initialize a new arguments object.
   Node* callee = GetFunctionClosure();
-  const Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kNewArguments, 1);
   Node* object = NewNode(op, callee);
 
   // Assign the object to the arguments variable.
@@ -1738,13 +1844,14 @@
 
 
 Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
-                                           Node* not_hole) {
+                                           Node* not_hole,
+                                           BailoutId bailout_id) {
   IfBuilder hole_check(this);
   Node* the_hole = jsgraph()->TheHoleConstant();
   Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
   hole_check.If(check);
   hole_check.Then();
-  environment()->Push(BuildThrowReferenceError(variable));
+  environment()->Push(BuildThrowReferenceError(variable, bailout_id));
   hole_check.Else();
   environment()->Push(not_hole);
   hole_check.End();
@@ -1754,6 +1861,7 @@
 
 Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
                                          BailoutId bailout_id,
+                                         const VectorSlotPair& feedback,
                                          ContextualMode contextual_mode) {
   Node* the_hole = jsgraph()->TheHoleConstant();
   VariableMode mode = variable->mode();
@@ -1762,9 +1870,10 @@
       // Global var, const, or let variable.
       Node* global = BuildLoadGlobalObject();
       Unique<Name> name = MakeUnique(variable->name());
-      const Operator* op = javascript()->LoadNamed(name, contextual_mode);
+      const Operator* op =
+          javascript()->LoadNamed(name, feedback, contextual_mode);
       Node* node = NewNode(op, global);
-      PrepareFrameState(node, bailout_id, kPushOutput);
+      PrepareFrameState(node, bailout_id, OutputFrameStateCombine::Push());
       return node;
     }
     case Variable::PARAMETER:
@@ -1782,9 +1891,9 @@
       } else if (mode == LET || mode == CONST) {
         // Perform check for uninitialized let/const variables.
         if (value->op() == the_hole->op()) {
-          value = BuildThrowReferenceError(variable);
+          value = BuildThrowReferenceError(variable, bailout_id);
         } else if (value->opcode() == IrOpcode::kPhi) {
-          value = BuildHoleCheckThrow(value, variable, value);
+          value = BuildHoleCheckThrow(value, variable, value, bailout_id);
         }
       }
       return value;
@@ -1805,7 +1914,7 @@
         value = BuildHoleCheckSilent(value, undefined, value);
       } else if (mode == LET || mode == CONST) {
         // Perform check for uninitialized let/const variables.
-        value = BuildHoleCheckThrow(value, variable, value);
+        value = BuildHoleCheckThrow(value, variable, value, bailout_id);
       }
       return value;
     }
@@ -1816,8 +1925,9 @@
           (contextual_mode == CONTEXTUAL)
               ? Runtime::kLoadLookupSlot
               : Runtime::kLoadLookupSlotNoReferenceError;
-      const Operator* op = javascript()->Runtime(function_id, 2);
+      const Operator* op = javascript()->CallRuntime(function_id, 2);
       Node* pair = NewNode(op, current_context(), name);
+      PrepareFrameState(pair, bailout_id, OutputFrameStateCombine::Push(1));
       return NewNode(common()->Projection(0), pair);
     }
   }
@@ -1826,26 +1936,32 @@
 }
 
 
-Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
+Node* AstGraphBuilder::BuildVariableDelete(
+    Variable* variable, BailoutId bailout_id,
+    OutputFrameStateCombine state_combine) {
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
       // Global var, const, or let variable.
       Node* global = BuildLoadGlobalObject();
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op = javascript()->DeleteProperty(strict_mode());
-      return NewNode(op, global, name);
+      Node* result = NewNode(op, global, name);
+      PrepareFrameState(result, bailout_id, state_combine);
+      return result;
     }
     case Variable::PARAMETER:
     case Variable::LOCAL:
     case Variable::CONTEXT:
       // Local var, const, or let variable or context variable.
-      return variable->is_this() ? jsgraph()->TrueConstant()
-                                 : jsgraph()->FalseConstant();
+      return jsgraph()->BooleanConstant(variable->is_this());
     case Variable::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
       Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
-      return NewNode(op, current_context(), name);
+      const Operator* op =
+          javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
+      Node* result = NewNode(op, current_context(), name);
+      PrepareFrameState(result, bailout_id, state_combine);
+      return result;
     }
   }
   UNREACHABLE();
@@ -1853,9 +1969,9 @@
 }
 
 
-Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
-                                               Token::Value op,
-                                               BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildVariableAssignment(
+    Variable* variable, Node* value, Token::Value op, BailoutId bailout_id,
+    OutputFrameStateCombine combine) {
   Node* the_hole = jsgraph()->TheHoleConstant();
   VariableMode mode = variable->mode();
   switch (variable->location()) {
@@ -1865,7 +1981,7 @@
       Unique<Name> name = MakeUnique(variable->name());
       const Operator* op = javascript()->StoreNamed(strict_mode(), name);
       Node* store = NewNode(op, global, value);
-      PrepareFrameState(store, bailout_id);
+      PrepareFrameState(store, bailout_id, combine);
       return store;
     }
     case Variable::PARAMETER:
@@ -1878,7 +1994,12 @@
           value = BuildHoleCheckSilent(current, value, current);
         }
       } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
-        // Non-initializing assignments to legacy const is ignored.
+        // Non-initializing assignments to legacy const is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        if (strict_mode() == STRICT) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
         return value;
       } else if (mode == LET && op != Token::INIT_LET) {
         // Perform an initialization check for let declared variables.
@@ -1887,13 +2008,13 @@
         // temporal dead zone of a let declared variable.
         Node* current = environment()->Lookup(variable);
         if (current->op() == the_hole->op()) {
-          value = BuildThrowReferenceError(variable);
+          value = BuildThrowReferenceError(variable, bailout_id);
         } else if (value->opcode() == IrOpcode::kPhi) {
-          value = BuildHoleCheckThrow(current, variable, value);
+          value = BuildHoleCheckThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT_CONST) {
-        // All assignments to const variables are early errors.
-        UNREACHABLE();
+        // Non-initializing assignments to const is exception in all modes.
+        return BuildThrowConstAssignError(bailout_id);
       }
       environment()->Bind(variable, value);
       return value;
@@ -1907,17 +2028,22 @@
         Node* current = NewNode(op, current_context());
         value = BuildHoleCheckSilent(current, value, current);
       } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
-        // Non-initializing assignments to legacy const is ignored.
+        // Non-initializing assignments to legacy const is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        if (strict_mode() == STRICT) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
         return value;
       } else if (mode == LET && op != Token::INIT_LET) {
         // Perform an initialization check for let declared variables.
         const Operator* op =
             javascript()->LoadContext(depth, variable->index(), false);
         Node* current = NewNode(op, current_context());
-        value = BuildHoleCheckThrow(current, variable, value);
+        value = BuildHoleCheckThrow(current, variable, value, bailout_id);
       } else if (mode == CONST && op != Token::INIT_CONST) {
-        // All assignments to const variables are early errors.
-        UNREACHABLE();
+        // Non-initializing assignments to const is exception in all modes.
+        return BuildThrowConstAssignError(bailout_id);
       }
       const Operator* op = javascript()->StoreContext(depth, variable->index());
       return NewNode(op, current_context(), value);
@@ -1928,8 +2054,11 @@
       Node* strict = jsgraph()->Constant(strict_mode());
       // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
       // initializations of const declarations.
-      const Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
-      return NewNode(op, value, current_context(), name, strict);
+      const Operator* op =
+          javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
+      Node* store = NewNode(op, value, current_context(), name, strict);
+      PrepareFrameState(store, bailout_id, combine);
+      return store;
     }
   }
   UNREACHABLE();
@@ -1938,7 +2067,6 @@
 
 
 Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
-  // TODO(sigurds) Use simplified load here once it is ready.
   Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
                              jsgraph()->Int32Constant(offset - kHeapObjectTag));
   return field_load;
@@ -1961,17 +2089,61 @@
 }
 
 
-Node* AstGraphBuilder::BuildToBoolean(Node* value) {
-  // TODO(mstarzinger): Possible optimization is to NOP for boolean values.
-  return NewNode(javascript()->ToBoolean(), value);
+Node* AstGraphBuilder::BuildLoadGlobalProxy() {
+  Node* global = BuildLoadGlobalObject();
+  Node* proxy =
+      BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
+  return proxy;
 }
 
 
-Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
+Node* AstGraphBuilder::BuildToBoolean(Node* input) {
+  // TODO(titzer): this should be in a JSOperatorReducer.
+  switch (input->opcode()) {
+    case IrOpcode::kInt32Constant:
+      return jsgraph_->BooleanConstant(!Int32Matcher(input).Is(0));
+    case IrOpcode::kFloat64Constant:
+      return jsgraph_->BooleanConstant(!Float64Matcher(input).Is(0));
+    case IrOpcode::kNumberConstant:
+      return jsgraph_->BooleanConstant(!NumberMatcher(input).Is(0));
+    case IrOpcode::kHeapConstant: {
+      Handle<Object> object = HeapObjectMatcher<Object>(input).Value().handle();
+      if (object->IsTrue()) return jsgraph_->TrueConstant();
+      if (object->IsFalse()) return jsgraph_->FalseConstant();
+      // TODO(turbofan): other constants.
+      break;
+    }
+    default:
+      break;
+  }
+  if (NodeProperties::IsTyped(input)) {
+    Type* upper = NodeProperties::GetBounds(input).upper;
+    if (upper->Is(Type::Boolean())) return input;
+  }
+
+  return NewNode(javascript()->ToBoolean(), input);
+}
+
+
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
+                                                BailoutId bailout_id) {
   // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
   Node* variable_name = jsgraph()->Constant(variable->name());
-  const Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
-  return NewNode(op, variable_name);
+  const Operator* op =
+      javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
+  Node* call = NewNode(op, variable_name);
+  PrepareFrameState(call, bailout_id);
+  return call;
+}
+
+
+Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
+  // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+  const Operator* op =
+      javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
+  Node* call = NewNode(op);
+  PrepareFrameState(call, bailout_id);
+  return call;
 }
 
 
@@ -2019,6 +2191,24 @@
 }
 
 
+Node* AstGraphBuilder::BuildStackCheck() {
+  IfBuilder stack_check(this);
+  Node* limit =
+      NewNode(jsgraph()->machine()->Load(kMachPtr),
+              jsgraph()->ExternalConstant(
+                  ExternalReference::address_of_stack_limit(isolate())),
+              jsgraph()->ZeroConstant());
+  Node* stack = NewNode(jsgraph()->machine()->LoadStackPointer());
+  Node* tag = NewNode(jsgraph()->machine()->UintLessThan(), limit, stack);
+  stack_check.If(tag, BranchHint::kTrue);
+  stack_check.Then();
+  stack_check.Else();
+  Node* guard = NewNode(javascript()->CallRuntime(Runtime::kStackGuard, 0));
+  stack_check.End();
+  return guard;
+}
+
+
 void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
                                         OutputFrameStateCombine combine) {
   if (OperatorProperties::HasFrameStateInput(node->op())) {
@@ -2029,6 +2219,13 @@
   }
 }
 
+
+BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
+    IterationStatement* stmt) {
+  if (loop_assignment_analysis_ == NULL) return NULL;
+  return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
 }
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 6a7e3db..0337c81 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -16,8 +16,9 @@
 namespace compiler {
 
 class ControlBuilder;
-class LoopBuilder;
 class Graph;
+class LoopAssignmentAnalysis;
+class LoopBuilder;
 
 // The AstGraphBuilder produces a high-level IR graph, based on an
 // underlying AST. The produced graph can either be compiled into a
@@ -25,7 +26,8 @@
 // of function inlining.
 class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
  public:
-  AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph);
+  AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+                  LoopAssignmentAnalysis* loop_assignment = NULL);
 
   // Creates a graph by visiting the entire AST.
   bool CreateGraph();
@@ -55,11 +57,7 @@
   // Support for control flow builders. The concrete type of the environment
   // depends on the graph builder, but environments themselves are not virtual.
   typedef StructuredGraphBuilder::Environment BaseEnvironment;
-  virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
-
-  // TODO(mstarzinger): The pipeline only needs to be a friend to access the
-  // function context. Remove as soon as the context is a parameter.
-  friend class Pipeline;
+  BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
 
   // Getters for values in the activation record.
   Node* GetFunctionClosure();
@@ -71,6 +69,9 @@
   // other dependencies tracked by the environment might be mutated though.
   //
 
+  // Builder to create a receiver check for sloppy mode.
+  Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
+
   // Builder to create a local function context.
   Node* BuildLocalFunctionContext(Node* context, Node* closure);
 
@@ -79,14 +80,19 @@
 
   // Builders for variable load and assignment.
   Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
-                                BailoutId bailout_id);
-  Node* BuildVariableDelete(Variable* var);
+                                BailoutId bailout_id,
+                                OutputFrameStateCombine state_combine =
+                                    OutputFrameStateCombine::Ignore());
+  Node* BuildVariableDelete(Variable* var, BailoutId bailout_id,
+                            OutputFrameStateCombine state_combine);
   Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
+                          const VectorSlotPair& feedback,
                           ContextualMode mode = CONTEXTUAL);
 
   // Builders for accessing the function context.
   Node* BuildLoadBuiltinsObject();
   Node* BuildLoadGlobalObject();
+  Node* BuildLoadGlobalProxy();
   Node* BuildLoadClosure();
   Node* BuildLoadObjectField(Node* object, int offset);
 
@@ -94,22 +100,27 @@
   Node* BuildToBoolean(Node* value);
 
   // Builders for error reporting at runtime.
-  Node* BuildThrowReferenceError(Variable* var);
+  Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
+  Node* BuildThrowConstAssignError(BailoutId bailout_id);
 
   // Builders for dynamic hole-checks at runtime.
   Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
-  Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+  Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole,
+                            BailoutId bailout_id);
 
   // Builders for binary operations.
   Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
 
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  // Builder for stack-check guards.
+  Node* BuildStackCheck();
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
   // Visiting functions for AST nodes make this an AstVisitor.
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
   // Visiting function for declarations list is overridden.
-  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
 
  private:
   CompilationInfo* info_;
@@ -117,7 +128,7 @@
   JSGraph* jsgraph_;
 
   // List of global declarations for functions and variables.
-  ZoneList<Handle<Object> > globals_;
+  ZoneVector<Handle<Object>> globals_;
 
   // Stack of breakable statements entered by the visitor.
   BreakableScope* breakable_;
@@ -129,15 +140,21 @@
   SetOncePointer<Node> function_closure_;
   SetOncePointer<Node> function_context_;
 
-  CompilationInfo* info() { return info_; }
-  StrictMode strict_mode() { return info()->strict_mode(); }
+  // Result of loop assignment analysis performed before graph creation.
+  LoopAssignmentAnalysis* loop_assignment_analysis_;
+
+  CompilationInfo* info() const { return info_; }
+  inline StrictMode strict_mode() const;
   JSGraph* jsgraph() { return jsgraph_; }
   JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
-  ZoneList<Handle<Object> >* globals() { return &globals_; }
+  ZoneVector<Handle<Object>>* globals() { return &globals_; }
 
   // Current scope during visitation.
   inline Scope* current_scope() const;
 
+  // Named and keyed loads require a VectorSlotPair for successful lowering.
+  VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+
   // Process arguments to a call by popping {arity} elements off the operand
   // stack and build a call node using the given call operator.
   Node* ProcessArguments(const Operator* op, int arity);
@@ -146,6 +163,7 @@
   void VisitIfNotNull(Statement* stmt);
 
   // Visit expressions.
+  void Visit(Expression* expr);
   void VisitForTest(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForValue(Expression* expr);
@@ -173,10 +191,11 @@
   void VisitForInAssignment(Expression* expr, Node* value);
 
   // Builds deoptimization for a given node.
-  void PrepareFrameState(Node* node, BailoutId ast_id,
-                         OutputFrameStateCombine combine = kIgnoreOutput);
+  void PrepareFrameState(
+      Node* node, BailoutId ast_id,
+      OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore());
 
-  OutputFrameStateCombine StateCombineFromAstContext();
+  BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
@@ -288,7 +307,8 @@
   // Determines how to combine the frame state with the value
   // that is about to be plugged into this AstContext.
   OutputFrameStateCombine GetStateCombine() {
-    return IsEffect() ? kIgnoreOutput : kPushOutput;
+    return IsEffect() ? OutputFrameStateCombine::Ignore()
+                      : OutputFrameStateCombine::Push();
   }
 
   // Plug a node into this expression context.  Call this function in tail
@@ -327,9 +347,9 @@
  public:
   explicit AstEffectContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {}
-  virtual ~AstEffectContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstEffectContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
@@ -338,9 +358,9 @@
  public:
   explicit AstValueContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kValue) {}
-  virtual ~AstValueContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstValueContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
@@ -349,9 +369,9 @@
  public:
   explicit AstTestContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kTest) {}
-  virtual ~AstTestContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstTestContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
@@ -423,8 +443,9 @@
 Scope* AstGraphBuilder::current_scope() const {
   return execution_context_->scope();
 }
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
new file mode 100644
index 0000000..7adac56
--- /dev/null
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -0,0 +1,305 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/parser.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef class AstLoopAssignmentAnalyzer ALAA;  // for code shortitude.
+
+ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
+    : info_(info), loop_stack_(zone) {
+  InitializeAstVisitor(zone);
+}
+
+
+LoopAssignmentAnalysis* ALAA::Analyze() {
+  LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
+  result_ = a;
+  VisitStatements(info()->function()->body());
+  result_ = NULL;
+  return a;
+}
+
+
+void ALAA::Enter(IterationStatement* loop) {
+  int num_variables = 1 + info()->scope()->num_parameters() +
+                      info()->scope()->num_stack_slots();
+  BitVector* bits = new (zone()) BitVector(num_variables, zone());
+  loop_stack_.push_back(bits);
+}
+
+
+void ALAA::Exit(IterationStatement* loop) {
+  DCHECK(loop_stack_.size() > 0);
+  BitVector* bits = loop_stack_.back();
+  loop_stack_.pop_back();
+  if (!loop_stack_.empty()) {
+    loop_stack_.back()->Union(*bits);
+  }
+  result_->list_.push_back(
+      std::pair<IterationStatement*, BitVector*>(loop, bits));
+}
+
+
+// ---------------------------------------------------------------------------
+// -- Leaf nodes -------------------------------------------------------------
+// ---------------------------------------------------------------------------
+
+void ALAA::VisitVariableDeclaration(VariableDeclaration* leaf) {}
+void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
+void ALAA::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
+void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
+void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
+void ALAA::VisitModuleVariable(ModuleVariable* leaf) {}
+void ALAA::VisitModulePath(ModulePath* leaf) {}
+void ALAA::VisitModuleUrl(ModuleUrl* leaf) {}
+void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
+void ALAA::VisitContinueStatement(ContinueStatement* leaf) {}
+void ALAA::VisitBreakStatement(BreakStatement* leaf) {}
+void ALAA::VisitDebuggerStatement(DebuggerStatement* leaf) {}
+void ALAA::VisitFunctionLiteral(FunctionLiteral* leaf) {}
+void ALAA::VisitNativeFunctionLiteral(NativeFunctionLiteral* leaf) {}
+void ALAA::VisitVariableProxy(VariableProxy* leaf) {}
+void ALAA::VisitLiteral(Literal* leaf) {}
+void ALAA::VisitRegExpLiteral(RegExpLiteral* leaf) {}
+void ALAA::VisitThisFunction(ThisFunction* leaf) {}
+void ALAA::VisitSuperReference(SuperReference* leaf) {}
+
+
+// ---------------------------------------------------------------------------
+// -- Pass-through nodes------------------------------------------------------
+// ---------------------------------------------------------------------------
+void ALAA::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
+
+
+void ALAA::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+
+
+void ALAA::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void ALAA::VisitIfStatement(IfStatement* stmt) {
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+
+void ALAA::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void ALAA::VisitWithStatement(WithStatement* stmt) {
+  Visit(stmt->expression());
+  Visit(stmt->statement());
+}
+
+
+void ALAA::VisitSwitchStatement(SwitchStatement* stmt) {
+  Visit(stmt->tag());
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  for (int i = 0; i < clauses->length(); i++) {
+    Visit(clauses->at(i));
+  }
+}
+
+
+void ALAA::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+
+void ALAA::VisitClassLiteral(ClassLiteral* e) {
+  VisitIfNotNull(e->extends());
+  VisitIfNotNull(e->constructor());
+  ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+  for (int i = 0; i < properties->length(); i++) {
+    Visit(properties->at(i)->value());
+  }
+}
+
+
+void ALAA::VisitConditional(Conditional* e) {
+  Visit(e->condition());
+  Visit(e->then_expression());
+  Visit(e->else_expression());
+}
+
+
+void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
+  ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+  for (int i = 0; i < properties->length(); i++) {
+    Visit(properties->at(i)->value());
+  }
+}
+
+
+void ALAA::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
+
+
+void ALAA::VisitYield(Yield* stmt) {
+  Visit(stmt->generator_object());
+  Visit(stmt->expression());
+}
+
+
+void ALAA::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
+
+
+void ALAA::VisitProperty(Property* e) {
+  Visit(e->obj());
+  Visit(e->key());
+}
+
+
+void ALAA::VisitCall(Call* e) {
+  Visit(e->expression());
+  VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitCallNew(CallNew* e) {
+  Visit(e->expression());
+  VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitCallRuntime(CallRuntime* e) {
+  VisitExpressions(e->arguments());
+}
+
+
+void ALAA::VisitUnaryOperation(UnaryOperation* e) { Visit(e->expression()); }
+
+
+void ALAA::VisitBinaryOperation(BinaryOperation* e) {
+  Visit(e->left());
+  Visit(e->right());
+}
+
+
+void ALAA::VisitCompareOperation(CompareOperation* e) {
+  Visit(e->left());
+  Visit(e->right());
+}
+
+
+void ALAA::VisitCaseClause(CaseClause* cc) {
+  if (!cc->is_default()) Visit(cc->label());
+  VisitStatements(cc->statements());
+}
+
+
+// ---------------------------------------------------------------------------
+// -- Interesting nodes-------------------------------------------------------
+// ---------------------------------------------------------------------------
+void ALAA::VisitModuleStatement(ModuleStatement* stmt) {
+  Visit(stmt->body());
+  // TODO(turbofan): can a module appear in a loop?
+  AnalyzeAssignment(stmt->proxy()->var());
+}
+
+
+void ALAA::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->catch_block());
+  // TODO(turbofan): are catch variables well-scoped?
+  AnalyzeAssignment(stmt->variable());
+}
+
+
+void ALAA::VisitDoWhileStatement(DoWhileStatement* loop) {
+  Enter(loop);
+  Visit(loop->body());
+  Visit(loop->cond());
+  Exit(loop);
+}
+
+
+void ALAA::VisitWhileStatement(WhileStatement* loop) {
+  Enter(loop);
+  Visit(loop->cond());
+  Visit(loop->body());
+  Exit(loop);
+}
+
+
+void ALAA::VisitForStatement(ForStatement* loop) {
+  VisitIfNotNull(loop->init());
+  Enter(loop);
+  VisitIfNotNull(loop->cond());
+  Visit(loop->body());
+  VisitIfNotNull(loop->next());
+  Exit(loop);
+}
+
+
+void ALAA::VisitForInStatement(ForInStatement* loop) {
+  Enter(loop);
+  Visit(loop->each());
+  Visit(loop->subject());
+  Visit(loop->body());
+  Exit(loop);
+}
+
+
+void ALAA::VisitForOfStatement(ForOfStatement* loop) {
+  Enter(loop);
+  Visit(loop->each());
+  Visit(loop->subject());
+  Visit(loop->body());
+  Exit(loop);
+}
+
+
+void ALAA::VisitAssignment(Assignment* stmt) {
+  Expression* l = stmt->target();
+  Visit(l);
+  Visit(stmt->value());
+  if (l->IsVariableProxy()) AnalyzeAssignment(l->AsVariableProxy()->var());
+}
+
+
+void ALAA::VisitCountOperation(CountOperation* e) {
+  Expression* l = e->expression();
+  Visit(l);
+  if (l->IsVariableProxy()) AnalyzeAssignment(l->AsVariableProxy()->var());
+}
+
+
+void ALAA::AnalyzeAssignment(Variable* var) {
+  if (!loop_stack_.empty() && var->IsStackAllocated()) {
+    loop_stack_.back()->Add(GetVariableIndex(info()->scope(), var));
+  }
+}
+
+
+int ALAA::GetVariableIndex(Scope* scope, Variable* var) {
+  CHECK(var->IsStackAllocated());
+  if (var->is_this()) return 0;
+  if (var->IsParameter()) return 1 + var->index();
+  return 1 + scope->num_parameters() + var->index();
+}
+
+
+int LoopAssignmentAnalysis::GetAssignmentCountForTesting(Scope* scope,
+                                                         Variable* var) {
+  int count = 0;
+  int var_index = AstLoopAssignmentAnalyzer::GetVariableIndex(scope, var);
+  for (size_t i = 0; i < list_.size(); i++) {
+    if (list_[i].second->Contains(var_index)) count++;
+  }
+  return count;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
new file mode 100644
index 0000000..00a7f2d
--- /dev/null
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -0,0 +1,78 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
+#define V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
+
+#include "src/ast.h"
+#include "src/bit-vector.h"
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Variable;
+class Scope;
+
+namespace compiler {
+
+// The result of analyzing loop assignments.
+class LoopAssignmentAnalysis : public ZoneObject {
+ public:
+  BitVector* GetVariablesAssignedInLoop(IterationStatement* loop) {
+    for (size_t i = 0; i < list_.size(); i++) {
+      // TODO(turbofan): hashmap or binary search for loop assignments.
+      if (list_[i].first == loop) return list_[i].second;
+    }
+    UNREACHABLE();  // should never ask for loops that aren't here!
+    return NULL;
+  }
+
+  int GetAssignmentCountForTesting(Scope* scope, Variable* var);
+
+ private:
+  friend class AstLoopAssignmentAnalyzer;
+  explicit LoopAssignmentAnalysis(Zone* zone) : list_(zone) {}
+  ZoneVector<std::pair<IterationStatement*, BitVector*>> list_;
+};
+
+
+// The class that performs loop assignment analysis by walking the AST.
+class AstLoopAssignmentAnalyzer : public AstVisitor {
+ public:
+  AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info);
+
+  LoopAssignmentAnalysis* Analyze();
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  static int GetVariableIndex(Scope* scope, Variable* var);
+
+ private:
+  CompilationInfo* info_;
+  ZoneDeque<BitVector*> loop_stack_;
+  LoopAssignmentAnalysis* result_;
+
+  CompilationInfo* info() { return info_; }
+
+  void Enter(IterationStatement* loop);
+  void Exit(IterationStatement* loop);
+
+  void VisitIfNotNull(AstNode* node) {
+    if (node != NULL) Visit(node);
+  }
+
+  void AnalyzeAssignment(Variable* var);
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstLoopAssignmentAnalyzer);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc
new file mode 100644
index 0000000..d7d3ade
--- /dev/null
+++ b/src/compiler/basic-block-instrumentor.cc
@@ -0,0 +1,106 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/basic-block-instrumentor.h"
+
+#include <sstream>
+
+#include "src/compiler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/schedule.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Find the first place to insert new nodes in a block that's already been
+// scheduled that won't upset the register allocator.
+static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
+  NodeVector::iterator i = block->begin();
+  for (; i != block->end(); ++i) {
+    const Operator* op = (*i)->op();
+    if (OperatorProperties::IsBasicBlockBegin(op)) continue;
+    switch (op->opcode()) {
+      case IrOpcode::kParameter:
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi:
+        continue;
+    }
+    break;
+  }
+  return i;
+}
+
+
+// TODO(dcarney): need to mark code as non-serializable.
+static const Operator* PointerConstant(CommonOperatorBuilder* common,
+                                       void* ptr) {
+  return kPointerSize == 8
+             ? common->Int64Constant(reinterpret_cast<intptr_t>(ptr))
+             : common->Int32Constant(
+                   static_cast<int32_t>(reinterpret_cast<intptr_t>(ptr)));
+}
+
+
+BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
+    CompilationInfo* info, Graph* graph, Schedule* schedule) {
+  // Skip the exit block in profiles, since the register allocator can't handle
+  // it and entry into it means falling off the end of the function anyway.
+  size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
+  BasicBlockProfiler::Data* data =
+      info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
+  // Set the function name.
+  if (!info->shared_info().is_null() &&
+      info->shared_info()->name()->IsString()) {
+    std::ostringstream os;
+    String::cast(info->shared_info()->name())->PrintUC16(os);
+    data->SetFunctionName(&os);
+  }
+  // Capture the schedule string before instrumentation.
+  {
+    std::ostringstream os;
+    os << *schedule;
+    data->SetSchedule(&os);
+  }
+  // Add the increment instructions to the start of every block.
+  CommonOperatorBuilder common(graph->zone());
+  Node* zero = graph->NewNode(common.Int32Constant(0));
+  Node* one = graph->NewNode(common.Int32Constant(1));
+  MachineOperatorBuilder machine(graph->zone());
+  BasicBlockVector* blocks = schedule->rpo_order();
+  size_t block_number = 0;
+  for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
+       ++it, ++block_number) {
+    BasicBlock* block = (*it);
+    data->SetBlockId(block_number, block->id().ToSize());
+    // TODO(dcarney): wire effect and control deps for load and store.
+    // Construct increment operation.
+    Node* base = graph->NewNode(
+        PointerConstant(&common, data->GetCounterAddress(block_number)));
+    Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero);
+    Node* inc = graph->NewNode(machine.Int32Add(), load, one);
+    Node* store = graph->NewNode(
+        machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
+        zero, inc);
+    // Insert the new nodes.
+    static const int kArraySize = 6;
+    Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
+    int insertion_start = block_number == 0 ? 0 : 2;
+    NodeVector::iterator insertion_point = FindInsertionPoint(block);
+    block->InsertNodes(insertion_point, &to_insert[insertion_start],
+                       &to_insert[kArraySize]);
+    // Tell the scheduler about the new nodes.
+    for (int i = insertion_start; i < kArraySize; ++i) {
+      schedule->SetBlockForNode(block, to_insert[i]);
+    }
+  }
+  return data;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/basic-block-instrumentor.h b/src/compiler/basic-block-instrumentor.h
new file mode 100644
index 0000000..7edac0d
--- /dev/null
+++ b/src/compiler/basic-block-instrumentor.h
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
+#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
+
+#include "src/v8.h"
+
+#include "src/basic-block-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationInfo;
+
+namespace compiler {
+
+class Graph;
+class Schedule;
+
+class BasicBlockInstrumentor : public AllStatic {
+ public:
+  static BasicBlockProfiler::Data* Instrument(CompilationInfo* info,
+                                              Graph* graph, Schedule* schedule);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/change-lowering-unittest.cc b/src/compiler/change-lowering-unittest.cc
deleted file mode 100644
index 994027a..0000000
--- a/src/compiler/change-lowering-unittest.cc
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/compiler-test-utils.h"
-#include "src/compiler/graph-unittest.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/typer.h"
-#include "testing/gmock-support.h"
-
-using testing::_;
-using testing::AllOf;
-using testing::Capture;
-using testing::CaptureEq;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(bmeurer): Find a new home for these functions.
-inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
-  OStringStream ost;
-  ost << type;
-  return os << ost.c_str();
-}
-
-
-class ChangeLoweringTest : public GraphTest {
- public:
-  ChangeLoweringTest() : simplified_(zone()) {}
-  virtual ~ChangeLoweringTest() {}
-
-  virtual MachineType WordRepresentation() const = 0;
-
- protected:
-  int HeapNumberValueOffset() const {
-    STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
-    return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
-           kHeapObjectTag;
-  }
-  bool Is32() const { return WordRepresentation() == kRepWord32; }
-  int PointerSize() const {
-    switch (WordRepresentation()) {
-      case kRepWord32:
-        return 4;
-      case kRepWord64:
-        return 8;
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return 0;
-  }
-  int SmiMaxValue() const { return -(SmiMinValue() + 1); }
-  int SmiMinValue() const {
-    return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
-  }
-  int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
-  int SmiShiftSize() const {
-    return Is32() ? SmiTagging<4>::SmiShiftSize()
-                  : SmiTagging<8>::SmiShiftSize();
-  }
-  int SmiValueSize() const {
-    return Is32() ? SmiTagging<4>::SmiValueSize()
-                  : SmiTagging<8>::SmiValueSize();
-  }
-
-  Node* Parameter(int32_t index = 0) {
-    return graph()->NewNode(common()->Parameter(index), graph()->start());
-  }
-
-  Reduction Reduce(Node* node) {
-    Typer typer(zone());
-    MachineOperatorBuilder machine(WordRepresentation());
-    JSOperatorBuilder javascript(zone());
-    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
-    CompilationInfo info(isolate(), zone());
-    Linkage linkage(&info);
-    ChangeLowering reducer(&jsgraph, &linkage);
-    return reducer.Reduce(node);
-  }
-
-  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
-  Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
-                                      const Matcher<Node*>& control_matcher) {
-    return IsCall(
-        _, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
-               CEntryStub(isolate(), 1).GetCode())),
-        IsExternalConstant(ExternalReference(
-            Runtime::FunctionForId(Runtime::kAllocateHeapNumber), isolate())),
-        IsInt32Constant(0), IsNumberConstant(0.0), effect_matcher,
-        control_matcher);
-  }
-  Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
-                             const Matcher<Node*>& rhs_matcher) {
-    return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
-                  : IsWord64Equal(lhs_matcher, rhs_matcher);
-  }
-
- private:
-  SimplifiedOperatorBuilder simplified_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Common.
-
-
-class ChangeLoweringCommonTest
-    : public ChangeLoweringTest,
-      public ::testing::WithParamInterface<MachineType> {
- public:
-  virtual ~ChangeLoweringCommonTest() {}
-
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return GetParam();
-  }
-};
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch;
-  EXPECT_THAT(phi,
-              IsPhi(static_cast<MachineType>(kTypeBool | kRepTagged),
-                    IsTrueConstant(), IsFalseConstant(),
-                    IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
-                                           IsBranch(val, graph()->start()))),
-                            IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* finish = reduction.replacement();
-  Capture<Node*> heap_number;
-  EXPECT_THAT(
-      finish,
-      IsFinish(
-          AllOf(CaptureEq(&heap_number),
-                IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
-          IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
-                  IsInt32Constant(HeapNumberValueOffset()), val,
-                  CaptureEq(&heap_number), graph()->start())));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
-  Node* node =
-      graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
-  Reduction reduction = Reduce(node);
-  EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
-                        ::testing::Values(kRepWord32, kRepWord64));
-
-
-// -----------------------------------------------------------------------------
-// 32-bit
-
-
-class ChangeLowering32Test : public ChangeLoweringTest {
- public:
-  virtual ~ChangeLowering32Test() {}
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return kRepWord32;
-  }
-};
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> add, branch, heap_number, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(kMachAnyTagged,
-            IsFinish(
-                AllOf(CaptureEq(&heap_number),
-                      IsAllocateHeapNumber(_, CaptureEq(&if_true))),
-                IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
-                        IsInt32Constant(HeapNumberValueOffset()),
-                        IsChangeInt32ToFloat64(val), CaptureEq(&heap_number),
-                        CaptureEq(&if_true))),
-            IsProjection(
-                0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
-            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
-                    IsIfFalse(AllOf(CaptureEq(&branch),
-                                    IsBranch(IsProjection(1, CaptureEq(&add)),
-                                             graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(
-          kMachFloat64,
-          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                 IsControlEffect(CaptureEq(&if_true))),
-          IsChangeInt32ToFloat64(
-              IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
-          IsMerge(
-              AllOf(CaptureEq(&if_true),
-                    IsIfTrue(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start())))),
-              IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(kMachInt32,
-            IsChangeFloat64ToInt32(IsLoad(
-                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                IsControlEffect(CaptureEq(&if_true)))),
-            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
-            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
-                    IsIfFalse(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(kMachUint32,
-            IsChangeFloat64ToUint32(IsLoad(
-                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                IsControlEffect(CaptureEq(&if_true)))),
-            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
-            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
-                    IsIfFalse(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, heap_number, if_false;
-  EXPECT_THAT(
-      phi,
-      IsPhi(
-          kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
-          IsFinish(
-              AllOf(CaptureEq(&heap_number),
-                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
-              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
-                      IsInt32Constant(HeapNumberValueOffset()),
-                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
-                      CaptureEq(&if_false))),
-          IsMerge(
-              IsIfTrue(AllOf(CaptureEq(&branch),
-                             IsBranch(IsUint32LessThanOrEqual(
-                                          val, IsInt32Constant(SmiMaxValue())),
-                                      graph()->start()))),
-              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
-// 64-bit
-
-
-class ChangeLowering64Test : public ChangeLoweringTest {
- public:
-  virtual ~ChangeLowering64Test() {}
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return kRepWord64;
-  }
-};
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  EXPECT_THAT(reduction.replacement(),
-              IsWord64Shl(IsChangeInt32ToInt64(val),
-                          IsInt32Constant(SmiShiftAmount())));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(
-          kMachFloat64,
-          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                 IsControlEffect(CaptureEq(&if_true))),
-          IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
-              IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
-          IsMerge(
-              AllOf(CaptureEq(&if_true),
-                    IsIfTrue(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start())))),
-              IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(kMachInt32,
-            IsChangeFloat64ToInt32(IsLoad(
-                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                IsControlEffect(CaptureEq(&if_true)))),
-            IsTruncateInt64ToInt32(
-                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
-            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
-                    IsIfFalse(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, if_true;
-  EXPECT_THAT(
-      phi,
-      IsPhi(kMachUint32,
-            IsChangeFloat64ToUint32(IsLoad(
-                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
-                IsControlEffect(CaptureEq(&if_true)))),
-            IsTruncateInt64ToInt32(
-                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
-            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
-                    IsIfFalse(AllOf(
-                        CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
-                                 graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Node* val = Parameter(0);
-  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
-  Reduction reduction = Reduce(node);
-  ASSERT_TRUE(reduction.Changed());
-
-  Node* phi = reduction.replacement();
-  Capture<Node*> branch, heap_number, if_false;
-  EXPECT_THAT(
-      phi,
-      IsPhi(
-          kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
-                                      IsInt32Constant(SmiShiftAmount())),
-          IsFinish(
-              AllOf(CaptureEq(&heap_number),
-                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
-              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
-                      IsInt32Constant(HeapNumberValueOffset()),
-                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
-                      CaptureEq(&if_false))),
-          IsMerge(
-              IsIfTrue(AllOf(CaptureEq(&branch),
-                             IsBranch(IsUint32LessThanOrEqual(
-                                          val, IsInt32Constant(SmiMaxValue())),
-                                      graph()->start()))),
-              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index b13db4c..7ddc751 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -3,9 +3,13 @@
 // found in the LICENSE file.
 
 #include "src/compiler/change-lowering.h"
-#include "src/compiler/machine-operator.h"
 
+#include "src/code-factory.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -45,7 +49,7 @@
   STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
   const int heap_number_value_offset =
       ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
-  return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
+  return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
 }
 
 
@@ -60,24 +64,21 @@
 Node* ChangeLowering::SmiShiftBitsConstant() {
   const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
                                                : SmiTagging<8>::SmiShiftSize();
-  return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
+  return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
 }
 
 
 Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
-  // The AllocateHeapNumber() runtime function does not use the context, so we
-  // can safely pass in Smi zero here.
-  Node* context = jsgraph()->ZeroConstant();
+  // The AllocateHeapNumberStub does not use the context, so we can safely pass
+  // in Smi zero here.
+  Callable callable = CodeFactory::AllocateHeapNumber(isolate());
+  CallDescriptor* descriptor = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0, CallDescriptor::kNoFlags);
+  Node* target = jsgraph()->HeapConstant(callable.code());
+  Node* context = jsgraph()->NoContextConstant();
   Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
-  const Runtime::Function* function =
-      Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
-  DCHECK_EQ(0, function->nargs);
-  CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
-      function->function_id, 0, Operator::kNoProperties);
-  Node* heap_number = graph()->NewNode(
-      common()->Call(desc), jsgraph()->CEntryStubConstant(),
-      jsgraph()->ExternalConstant(ExternalReference(function, isolate())),
-      jsgraph()->Int32Constant(function->nargs), context, effect, control);
+  Node* heap_number = graph()->NewNode(common()->Call(descriptor), target,
+                                       context, effect, control);
   Node* store = graph()->NewNode(
       machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
       heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
@@ -85,6 +86,16 @@
 }
 
 
+Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
+  return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
+}
+
+
+Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
+  return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
+}
+
+
 Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
   value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
   if (machine()->Is64()) {
@@ -94,28 +105,44 @@
 }
 
 
+Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
+  return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+}
+
+
+Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+  }
+  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+
 Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
   return graph()->NewNode(machine()->Load(kMachFloat64), value,
-                          HeapNumberValueIndexConstant(),
-                          graph()->NewNode(common()->ControlEffect(), control));
+                          HeapNumberValueIndexConstant(), graph()->start(),
+                          control);
+}
+
+
+Node* ChangeLowering::TestNotSmi(Node* value) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+  return graph()->NewNode(machine()->WordAnd(), value,
+                          jsgraph()->IntPtrConstant(kSmiTagMask));
+}
+
+
+Node* ChangeLowering::Uint32LessThanOrEqual(Node* lhs, Node* rhs) {
+  return graph()->NewNode(machine()->Uint32LessThanOrEqual(), lhs, rhs);
 }
 
 
 Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
-  Node* branch = graph()->NewNode(common()->Branch(), val, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* true_value = jsgraph()->TrueConstant();
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* false_value = jsgraph()->FalseConstant();
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(
-      common()->Phi(static_cast<MachineType>(kTypeBool | kRepTagged), 2),
-      true_value, false_value, merge);
-
-  return Replace(phi);
+  MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
+  return Replace(graph()->NewNode(common()->Select(type), val,
+                                  jsgraph()->TrueConstant(),
+                                  jsgraph()->FalseConstant()));
 }
 
 
@@ -130,109 +157,109 @@
 }
 
 
-Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
+Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
   if (machine()->Is64()) {
+    return Replace(graph()->NewNode(
+        machine()->Word64Shl(),
+        graph()->NewNode(machine()->ChangeInt32ToInt64(), value),
+        SmiShiftBitsConstant()));
+  } else if (NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
     return Replace(
-        graph()->NewNode(machine()->Word64Shl(),
-                         graph()->NewNode(machine()->ChangeInt32ToInt64(), val),
-                         SmiShiftBitsConstant()));
+        graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant()));
   }
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
   Node* ovf = graph()->NewNode(common()->Projection(1), add);
 
-  Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* heap_number = AllocateHeapNumberWithValue(
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), if_true);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* smi = graph()->NewNode(common()->Projection(0), add);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), heap_number,
-                               smi, merge);
-
-  return Replace(phi);
+  Diamond d(graph(), common(), ovf, BranchHint::kFalse);
+  d.Chain(control);
+  return Replace(
+      d.Phi(kMachAnyTagged,
+            AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), d.if_true),
+            graph()->NewNode(common()->Projection(0), add)));
 }
 
 
-Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
+Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
                                              Signedness signedness) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagMask == 1);
-
-  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
-  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  const MachineType type = (signedness == kSigned) ? kMachInt32 : kMachUint32;
   const Operator* op = (signedness == kSigned)
                            ? machine()->ChangeFloat64ToInt32()
                            : machine()->ChangeFloat64ToUint32();
-  Node* change = graph()->NewNode(op, LoadHeapNumberValue(val, if_true));
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* number = ChangeSmiToInt32(val);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(
-      common()->Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, 2),
-      change, number, merge);
-
-  return Replace(phi);
+  Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
+  d.Chain(control);
+  return Replace(
+      d.Phi(type, graph()->NewNode(op, LoadHeapNumberValue(value, d.if_true)),
+            ChangeSmiToInt32(value)));
 }
 
 
-Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagMask == 1);
+namespace {
 
-  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
-  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+bool CanCover(Node* value, IrOpcode::Value opcode) {
+  if (value->opcode() != opcode) return false;
+  bool first = true;
+  for (Edge const edge : value->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) continue;
+    DCHECK(NodeProperties::IsValueEdge(edge));
+    if (!first) return false;
+    first = false;
+  }
+  return true;
+}
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* load = LoadHeapNumberValue(val, if_true);
+}  // namespace
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(),
-                                  ChangeSmiToInt32(val));
 
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi =
-      graph()->NewNode(common()->Phi(kMachFloat64, 2), load, number, merge);
+Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
+  if (CanCover(value, IrOpcode::kJSToNumber)) {
+    // ChangeTaggedToFloat64(JSToNumber(x)) =>
+    //   if IsSmi(x) then ChangeSmiToFloat64(x)
+    //   else let y = JSToNumber(x) in
+    //     if IsSmi(y) then ChangeSmiToFloat64(y)
+    //     else LoadHeapNumberValue(y)
+    Node* const object = NodeProperties::GetValueInput(value, 0);
+    Node* const context = NodeProperties::GetContextInput(value);
+    Node* const effect = NodeProperties::GetEffectInput(value);
+    Node* const control = NodeProperties::GetControlInput(value);
 
-  return Replace(phi);
+    Diamond d1(graph(), common(), TestNotSmi(object), BranchHint::kFalse);
+    d1.Chain(control);
+
+    Node* number =
+        graph()->NewNode(value->op(), object, context, effect, d1.if_true);
+    Diamond d2(graph(), common(), TestNotSmi(number));
+    d2.Nest(d1, true);
+    Node* phi2 = d2.Phi(kMachFloat64, LoadHeapNumberValue(number, d2.if_true),
+                        ChangeSmiToFloat64(number));
+
+    Node* phi1 = d1.Phi(kMachFloat64, phi2, ChangeSmiToFloat64(object));
+    Node* ephi1 = d1.EffectPhi(number, effect);
+
+    for (Edge edge : value->use_edges()) {
+      if (NodeProperties::IsEffectEdge(edge)) {
+        edge.UpdateTo(ephi1);
+      }
+    }
+    return Replace(phi1);
+  }
+
+  Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
+  d.Chain(control);
+  Node* load = LoadHeapNumberValue(value, d.if_true);
+  Node* number = ChangeSmiToFloat64(value);
+  return Replace(d.Phi(kMachFloat64, load, number));
 }
 
 
-Reduction ChangeLowering::ChangeUint32ToTagged(Node* val, Node* control) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagMask == 1);
-
-  Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
-                               SmiMaxValueConstant());
-  Node* branch = graph()->NewNode(common()->Branch(), cmp, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* smi = graph()->NewNode(
-      machine()->WordShl(),
-      machine()->Is64()
-          ? graph()->NewNode(machine()->ChangeUint32ToUint64(), val)
-          : val,
-      SmiShiftBitsConstant());
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* heap_number = AllocateHeapNumberWithValue(
-      graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), smi,
-                               heap_number, merge);
-
-  return Replace(phi);
+Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
+  Diamond d(graph(), common(),
+            Uint32LessThanOrEqual(value, SmiMaxValueConstant()),
+            BranchHint::kTrue);
+  d.Chain(control);
+  return Replace(d.Phi(
+      kMachAnyTagged, ChangeUint32ToSmi(value),
+      AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), d.if_false)));
 }
 
 
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index 5d7ab41..773fd08 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -21,9 +21,9 @@
  public:
   ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
       : jsgraph_(jsgraph), linkage_(linkage) {}
-  virtual ~ChangeLowering();
+  ~ChangeLowering() FINAL;
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
   Node* HeapNumberValueIndexConstant();
@@ -31,16 +31,23 @@
   Node* SmiShiftBitsConstant();
 
   Node* AllocateHeapNumberWithValue(Node* value, Node* control);
+  Node* ChangeInt32ToFloat64(Node* value);
+  Node* ChangeSmiToFloat64(Node* value);
   Node* ChangeSmiToInt32(Node* value);
+  Node* ChangeUint32ToFloat64(Node* value);
+  Node* ChangeUint32ToSmi(Node* value);
   Node* LoadHeapNumberValue(Node* value, Node* control);
+  Node* TestNotSmi(Node* value);
+  Node* Uint32LessThanOrEqual(Node* lhs, Node* rhs);
 
-  Reduction ChangeBitToBool(Node* val, Node* control);
-  Reduction ChangeBoolToBit(Node* val);
-  Reduction ChangeFloat64ToTagged(Node* val, Node* control);
-  Reduction ChangeInt32ToTagged(Node* val, Node* control);
-  Reduction ChangeTaggedToFloat64(Node* val, Node* control);
-  Reduction ChangeTaggedToUI32(Node* val, Node* control, Signedness signedness);
-  Reduction ChangeUint32ToTagged(Node* val, Node* control);
+  Reduction ChangeBitToBool(Node* value, Node* control);
+  Reduction ChangeBoolToBit(Node* value);
+  Reduction ChangeFloat64ToTagged(Node* value, Node* control);
+  Reduction ChangeInt32ToTagged(Node* value, Node* control);
+  Reduction ChangeTaggedToFloat64(Node* value, Node* control);
+  Reduction ChangeTaggedToUI32(Node* value, Node* control,
+                               Signedness signedness);
+  Reduction ChangeUint32ToTagged(Node* value, Node* control);
 
   Graph* graph() const;
   Isolate* isolate() const;
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index a3f7e4c..7942344 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -5,15 +5,12 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
 #define V8_COMPILER_CODE_GENERATOR_IMPL_H_
 
+#include "src/code-stubs.h"
 #include "src/compiler/code-generator.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/generic-graph.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -28,6 +25,8 @@
   InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
       : gen_(gen), instr_(instr) {}
 
+  // -- Instruction operand accesses with conversions --------------------------
+
   Register InputRegister(int index) {
     return ToRegister(instr_->InputAt(index));
   }
@@ -60,27 +59,31 @@
     return ToHeapObject(instr_->InputAt(index));
   }
 
-  Label* InputLabel(int index) {
-    return gen_->code()->GetLabel(InputBlock(index));
-  }
+  Label* InputLabel(int index) { return ToLabel(instr_->InputAt(index)); }
 
-  BasicBlock* InputBlock(int index) {
-    NodeId block_id = static_cast<NodeId>(InputInt32(index));
-    // operand should be a block id.
-    DCHECK(block_id >= 0);
-    DCHECK(block_id < gen_->schedule()->BasicBlockCount());
-    return gen_->schedule()->GetBlockById(block_id);
+  BasicBlock::RpoNumber InputRpo(int index) {
+    return ToRpoNumber(instr_->InputAt(index));
   }
 
   Register OutputRegister(int index = 0) {
     return ToRegister(instr_->OutputAt(index));
   }
 
+  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
   DoubleRegister OutputDoubleRegister() {
     return ToDoubleRegister(instr_->Output());
   }
 
-  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+  // -- Conversions for operands -----------------------------------------------
+
+  Label* ToLabel(InstructionOperand* op) {
+    return gen_->GetLabel(ToRpoNumber(op));
+  }
+
+  BasicBlock::RpoNumber ToRpoNumber(InstructionOperand* op) {
+    return ToConstant(op).ToRpoNumber();
+  }
 
   Register ToRegister(InstructionOperand* op) {
     DCHECK(op->IsRegister());
@@ -92,19 +95,17 @@
     return DoubleRegister::FromAllocationIndex(op->index());
   }
 
-  Constant ToConstant(InstructionOperand* operand) {
-    if (operand->IsImmediate()) {
-      return gen_->code()->GetImmediate(operand->index());
+  Constant ToConstant(InstructionOperand* op) {
+    if (op->IsImmediate()) {
+      return gen_->code()->GetImmediate(op->index());
     }
-    return gen_->code()->GetConstant(operand->index());
+    return gen_->code()->GetConstant(op->index());
   }
 
-  double ToDouble(InstructionOperand* operand) {
-    return ToConstant(operand).ToFloat64();
-  }
+  double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
 
-  Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
-    return ToConstant(operand).ToHeapObject();
+  Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
+    return ToConstant(op).ToHeapObject();
   }
 
   Frame* frame() const { return gen_->frame(); }
@@ -117,6 +118,27 @@
 };
 
 
+// Generator for out-of-line code that is emitted after the main code is done.
+class OutOfLineCode : public ZoneObject {
+ public:
+  explicit OutOfLineCode(CodeGenerator* gen);
+  virtual ~OutOfLineCode();
+
+  virtual void Generate() = 0;
+
+  Label* entry() { return &entry_; }
+  Label* exit() { return &exit_; }
+  MacroAssembler* masm() const { return masm_; }
+  OutOfLineCode* next() const { return next_; }
+
+ private:
+  Label entry_;
+  Label exit_;
+  MacroAssembler* const masm_;
+  OutOfLineCode* const next_;
+};
+
+
 // TODO(dcarney): generify this on bleeding_edge and replace this call
 // when merged.
 static inline void FinishCode(MacroAssembler* masm) {
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index f22c479..cfe4f06 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -12,9 +12,14 @@
 namespace internal {
 namespace compiler {
 
-CodeGenerator::CodeGenerator(InstructionSequence* code)
-    : code_(code),
-      current_block_(NULL),
+CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
+                             InstructionSequence* code, CompilationInfo* info)
+    : frame_(frame),
+      linkage_(linkage),
+      code_(code),
+      info_(info),
+      labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
+      current_block_(BasicBlock::RpoNumber::Invalid()),
       current_source_position_(SourcePosition::Invalid()),
       masm_(code->zone()->isolate(), NULL, 0),
       resolver_(this),
@@ -22,11 +27,16 @@
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
       translations_(code->zone()),
-      last_lazy_deopt_pc_(0) {}
+      last_lazy_deopt_pc_(0),
+      ools_(nullptr) {
+  for (int i = 0; i < code->InstructionBlockCount(); ++i) {
+    new (&labels_[i]) Label;
+  }
+}
 
 
 Handle<Code> CodeGenerator::GenerateCode() {
-  CompilationInfo* info = linkage()->info();
+  CompilationInfo* info = this->info();
 
   // Emit a code line info recording start event.
   PositionsRecorder* recorder = masm()->positions_recorder();
@@ -41,15 +51,42 @@
   info->set_prologue_offset(masm()->pc_offset());
   AssemblePrologue();
 
-  // Assemble all instructions.
-  for (InstructionSequence::const_iterator i = code()->begin();
-       i != code()->end(); ++i) {
-    AssembleInstruction(*i);
+  // Assemble all non-deferred blocks, followed by deferred ones.
+  for (int deferred = 0; deferred < 2; ++deferred) {
+    for (auto const block : code()->instruction_blocks()) {
+      if (block->IsDeferred() == (deferred == 0)) {
+        continue;
+      }
+      // Align loop headers on 16-byte boundaries.
+      if (block->IsLoopHeader()) masm()->Align(16);
+      // Bind a label for a block.
+      current_block_ = block->rpo_number();
+      if (FLAG_code_comments) {
+        // TODO(titzer): these code comments are a giant memory leak.
+        Vector<char> buffer = Vector<char>::New(32);
+        SNPrintF(buffer, "-- B%d start --", block->id().ToInt());
+        masm()->RecordComment(buffer.start());
+      }
+      masm()->bind(GetLabel(current_block_));
+      for (int i = block->code_start(); i < block->code_end(); ++i) {
+        AssembleInstruction(code()->InstructionAt(i));
+      }
+    }
+  }
+
+  // Assemble all out-of-line code.
+  if (ools_) {
+    masm()->RecordComment("-- Out of line code --");
+    for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
+      masm()->bind(ool->entry());
+      ool->Generate();
+      masm()->jmp(ool->exit());
+    }
   }
 
   FinishCode(masm());
 
-  // Ensure there is space for lazy deopt.
+  // Ensure there is space for lazy deoptimization in the code.
   if (!info->IsStub()) {
     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
     while (masm()->pc_offset() < target_offset) {
@@ -72,6 +109,11 @@
 
   PopulateDeoptimizationData(result);
 
+  // Ensure there is space for lazy deoptimization in the relocation info.
+  if (!info->IsStub()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
+  }
+
   // Emit a code line info recording stop event.
   void* line_info = recorder->DetachJITHandlerData();
   LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
@@ -80,6 +122,12 @@
 }
 
 
+bool CodeGenerator::IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
+  return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
+      code()->InstructionBlockAt(block)->ao_number());
+}
+
+
 void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
                                     int arguments,
                                     Safepoint::DeoptMode deopt_mode) {
@@ -100,18 +148,6 @@
 
 
 void CodeGenerator::AssembleInstruction(Instruction* instr) {
-  if (instr->IsBlockStart()) {
-    // Bind a label for a block start and handle parallel moves.
-    BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
-    current_block_ = block_start->block();
-    if (FLAG_code_comments) {
-      // TODO(titzer): these code comments are a giant memory leak.
-      Vector<char> buffer = Vector<char>::New(32);
-      SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
-      masm()->RecordComment(buffer.start());
-    }
-    masm()->bind(block_start->label());
-  }
   if (instr->IsGapMoves()) {
     // Handle parallel moves associated with the gap instruction.
     AssembleGap(GapInstruction::cast(instr));
@@ -121,18 +157,39 @@
     // Assemble architecture-specific code for the instruction.
     AssembleArchInstruction(instr);
 
-    // Assemble branches or boolean materializations after this instruction.
     FlagsMode mode = FlagsModeField::decode(instr->opcode());
     FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
-    switch (mode) {
-      case kFlags_none:
+    if (mode == kFlags_branch) {
+      // Assemble a branch after this instruction.
+      InstructionOperandConverter i(this, instr);
+      BasicBlock::RpoNumber true_rpo =
+          i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+      BasicBlock::RpoNumber false_rpo =
+          i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
+
+      if (true_rpo == false_rpo) {
+        // redundant branch.
+        if (!IsNextInAssemblyOrder(true_rpo)) {
+          AssembleArchJump(true_rpo);
+        }
         return;
-      case kFlags_set:
-        return AssembleArchBoolean(instr, condition);
-      case kFlags_branch:
-        return AssembleArchBranch(instr, condition);
+      }
+      if (IsNextInAssemblyOrder(true_rpo)) {
+        // true block is next, can fall through if condition negated.
+        std::swap(true_rpo, false_rpo);
+        condition = NegateFlagsCondition(condition);
+      }
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = GetLabel(true_rpo);
+      branch.false_label = GetLabel(false_rpo);
+      branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+    } else if (mode == kFlags_set) {
+      // Assemble a boolean materialization after this instruction.
+      AssembleArchBoolean(instr, condition);
     }
-    UNREACHABLE();
   }
 }
 
@@ -147,7 +204,7 @@
     masm()->positions_recorder()->WriteRecordedPositions();
     if (FLAG_code_comments) {
       Vector<char> buffer = Vector<char>::New(256);
-      CompilationInfo* info = linkage()->info();
+      CompilationInfo* info = this->info();
       int ln = Script::GetLineNumber(info->script(), code_pos);
       int cn = Script::GetColumnNumber(info->script(), code_pos);
       if (info->script()->name()->IsString()) {
@@ -177,7 +234,7 @@
 
 
 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
-  CompilationInfo* info = linkage()->info();
+  CompilationInfo* info = this->info();
   int deopt_count = static_cast<int>(deoptimization_states_.size());
   if (deopt_count == 0) return;
   Handle<DeoptimizationInputData> data =
@@ -260,17 +317,17 @@
     // because it is only used to get locals and arguments (by the debugger and
     // f.arguments), and those are the same in the pre-call and post-call
     // states.
-    if (descriptor->state_combine() != kIgnoreOutput) {
-      deopt_state_id =
-          BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
+    if (!descriptor->state_combine().IsOutputIgnored()) {
+      deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
+                                        OutputFrameStateCombine::Ignore());
     }
 #if DEBUG
     // Make sure all the values live in stack slots or they are immediates.
     // (The values should not live in register because registers are clobbered
     // by calls.)
-    for (size_t i = 0; i < descriptor->size(); i++) {
+    for (size_t i = 0; i < descriptor->GetSize(); i++) {
       InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
-      CHECK(op->IsStackSlot() || op->IsImmediate());
+      CHECK(op->IsStackSlot() || op->IsDoubleStackSlot() || op->IsImmediate());
     }
 #endif
     safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
@@ -296,6 +353,44 @@
   return code()->GetFrameStateDescriptor(state_id);
 }
 
+struct OperandAndType {
+  OperandAndType(InstructionOperand* operand, MachineType type)
+      : operand_(operand), type_(type) {}
+
+  InstructionOperand* operand_;
+  MachineType type_;
+};
+
+static OperandAndType TypedOperandForFrameState(
+    FrameStateDescriptor* descriptor, Instruction* instr,
+    size_t frame_state_offset, size_t index, OutputFrameStateCombine combine) {
+  DCHECK(index < descriptor->GetSize(combine));
+  switch (combine.kind()) {
+    case OutputFrameStateCombine::kPushOutput: {
+      DCHECK(combine.GetPushCount() <= instr->OutputCount());
+      size_t size_without_output =
+          descriptor->GetSize(OutputFrameStateCombine::Ignore());
+      // If the index is past the existing stack items, return the output.
+      if (index >= size_without_output) {
+        return OperandAndType(instr->OutputAt(index - size_without_output),
+                              kMachAnyTagged);
+      }
+      break;
+    }
+    case OutputFrameStateCombine::kPokeAt:
+      size_t index_from_top =
+          descriptor->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+      if (index >= index_from_top &&
+          index < index_from_top + instr->OutputCount()) {
+        return OperandAndType(instr->OutputAt(index - index_from_top),
+                              kMachAnyTagged);
+      }
+      break;
+  }
+  return OperandAndType(instr->InputAt(frame_state_offset + index),
+                        descriptor->GetType(index));
+}
+
 
 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
     FrameStateDescriptor* descriptor, Instruction* instr,
@@ -305,7 +400,7 @@
   if (descriptor->outer_state() != NULL) {
     BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
                                             translation, frame_state_offset,
-                                            kIgnoreOutput);
+                                            OutputFrameStateCombine::Ignore());
   }
 
   int id = Translation::kSelfLiteralId;
@@ -318,7 +413,8 @@
     case JS_FRAME:
       translation->BeginJSFrame(
           descriptor->bailout_id(), id,
-          static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
+          static_cast<unsigned int>(descriptor->GetSize(state_combine) -
+                                    descriptor->parameters_count()));
       break;
     case ARGUMENTS_ADAPTOR:
       translation->BeginArgumentsAdaptorFrame(
@@ -327,19 +423,10 @@
   }
 
   frame_state_offset += descriptor->outer_state()->GetTotalSize();
-  for (size_t i = 0; i < descriptor->size(); i++) {
-    AddTranslationForOperand(
-        translation, instr,
-        instr->InputAt(static_cast<int>(frame_state_offset + i)));
-  }
-
-  switch (state_combine) {
-    case kPushOutput:
-      DCHECK(instr->OutputCount() == 1);
-      AddTranslationForOperand(translation, instr, instr->OutputAt(0));
-      break;
-    case kIgnoreOutput:
-      break;
+  for (size_t i = 0; i < descriptor->GetSize(state_combine); i++) {
+    OperandAndType op = TypedOperandForFrameState(
+        descriptor, instr, frame_state_offset, i, state_combine);
+    AddTranslationForOperand(translation, instr, op.operand_, op.type_);
   }
 }
 
@@ -368,15 +455,38 @@
 
 void CodeGenerator::AddTranslationForOperand(Translation* translation,
                                              Instruction* instr,
-                                             InstructionOperand* op) {
+                                             InstructionOperand* op,
+                                             MachineType type) {
   if (op->IsStackSlot()) {
-    translation->StoreStackSlot(op->index());
+    if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
+        type == kMachInt16) {
+      translation->StoreInt32StackSlot(op->index());
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
+      translation->StoreUint32StackSlot(op->index());
+    } else if ((type & kRepMask) == kRepTagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      CHECK(false);
+    }
   } else if (op->IsDoubleStackSlot()) {
+    DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsRegister()) {
     InstructionOperandConverter converter(this, instr);
-    translation->StoreRegister(converter.ToRegister(op));
+    if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
+        type == kMachInt16) {
+      translation->StoreInt32Register(converter.ToRegister(op));
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
+      translation->StoreUint32Register(converter.ToRegister(op));
+    } else if ((type & kRepMask) == kRepTagged) {
+      translation->StoreRegister(converter.ToRegister(op));
+    } else {
+      CHECK(false);
+    }
   } else if (op->IsDoubleRegister()) {
+    DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
     InstructionOperandConverter converter(this, instr);
     translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
   } else if (op->IsImmediate()) {
@@ -385,22 +495,25 @@
     Handle<Object> constant_object;
     switch (constant.type()) {
       case Constant::kInt32:
+        DCHECK(type == kMachInt32 || type == kMachUint32);
         constant_object =
             isolate()->factory()->NewNumberFromInt(constant.ToInt32());
         break;
       case Constant::kFloat64:
+        DCHECK(type == kMachFloat64 || type == kMachAnyTagged);
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
+        DCHECK((type & kRepMask) == kRepTagged);
         constant_object = constant.ToHeapObject();
         break;
       default:
-        UNREACHABLE();
+        CHECK(false);
     }
     int literal_id = DefineDeoptimizationLiteral(constant_object);
     translation->StoreLiteral(literal_id);
   } else {
-    UNREACHABLE();
+    CHECK(false);
   }
 }
 
@@ -417,7 +530,7 @@
 
 
 void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+                                       BranchInfo* branch) {
   UNIMPLEMENTED();
 }
 
@@ -428,6 +541,11 @@
 }
 
 
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  UNIMPLEMENTED();
+}
+
+
 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
   UNIMPLEMENTED();
 }
@@ -455,6 +573,15 @@
 
 #endif  // !V8_TURBOFAN_BACKEND
 
+
+OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
+    : masm_(gen->masm()), next_(gen->ools_) {
+  gen->ools_ = this;
+}
+
+
+OutOfLineCode::~OutOfLineCode() {}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index ddc2f9a..747bad2 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -5,8 +5,6 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_H_
 #define V8_COMPILER_CODE_GENERATOR_H_
 
-#include <deque>
-
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/instruction.h"
 #include "src/deoptimizer.h"
@@ -17,33 +15,44 @@
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class Linkage;
+class OutOfLineCode;
+
+struct BranchInfo {
+  FlagsCondition condition;
+  Label* true_label;
+  Label* false_label;
+  bool fallthru;
+};
+
+
 // Generates native code for a sequence of instructions.
 class CodeGenerator FINAL : public GapResolver::Assembler {
  public:
-  explicit CodeGenerator(InstructionSequence* code);
+  explicit CodeGenerator(Frame* frame, Linkage* linkage,
+                         InstructionSequence* code, CompilationInfo* info);
 
   // Generate native code.
   Handle<Code> GenerateCode();
 
   InstructionSequence* code() const { return code_; }
-  Frame* frame() const { return code()->frame(); }
-  Graph* graph() const { return code()->graph(); }
+  Frame* frame() const { return frame_; }
   Isolate* isolate() const { return zone()->isolate(); }
-  Linkage* linkage() const { return code()->linkage(); }
-  Schedule* schedule() const { return code()->schedule(); }
+  Linkage* linkage() const { return linkage_; }
+
+  Label* GetLabel(BasicBlock::RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
 
  private:
   MacroAssembler* masm() { return &masm_; }
   GapResolver* resolver() { return &resolver_; }
   SafepointTableBuilder* safepoints() { return &safepoints_; }
   Zone* zone() const { return code()->zone(); }
+  CompilationInfo* info() const { return info_; }
 
   // Checks if {block} will appear directly after {current_block_} when
   // assembling code, in which case, a fall-through can be used.
-  bool IsNextInAssemblyOrder(const BasicBlock* block) const {
-    return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
-           block->deferred_ == current_block_->deferred_;
-  }
+  bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const;
 
   // Record a safepoint with the given pointer map.
   void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
@@ -59,7 +68,8 @@
   // ===========================================================================
 
   void AssembleArchInstruction(Instruction* instr);
-  void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+  void AssembleArchJump(BasicBlock::RpoNumber target);
+  void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
 
   void AssembleDeoptimizerCall(int deoptimization_id);
@@ -76,10 +86,10 @@
   // ===========================================================================
 
   // Interface used by the gap resolver to emit moves and swaps.
-  virtual void AssembleMove(InstructionOperand* source,
-                            InstructionOperand* destination) OVERRIDE;
-  virtual void AssembleSwap(InstructionOperand* source,
-                            InstructionOperand* destination) OVERRIDE;
+  void AssembleMove(InstructionOperand* source,
+                    InstructionOperand* destination) FINAL;
+  void AssembleSwap(InstructionOperand* source,
+                    InstructionOperand* destination) FINAL;
 
   // ===========================================================================
   // Deoptimization table construction
@@ -96,7 +106,7 @@
       Translation* translation, size_t frame_state_offset,
       OutputFrameStateCombine state_combine);
   void AddTranslationForOperand(Translation* translation, Instruction* instr,
-                                InstructionOperand* op);
+                                InstructionOperand* op, MachineType type);
   void AddNopForSmiCodeInlining();
   void EnsureSpaceForLazyDeopt();
   void MarkLazyDeoptSite();
@@ -119,8 +129,14 @@
     int pc_offset_;
   };
 
-  InstructionSequence* code_;
-  BasicBlock* current_block_;
+  friend class OutOfLineCode;
+
+  Frame* const frame_;
+  Linkage* const linkage_;
+  InstructionSequence* const code_;
+  CompilationInfo* const info_;
+  Label* const labels_;
+  BasicBlock::RpoNumber current_block_;
   SourcePosition current_source_position_;
   MacroAssembler masm_;
   GapResolver resolver_;
@@ -129,6 +145,7 @@
   ZoneDeque<Handle<Object> > deoptimization_literals_;
   TranslationBuffer translations_;
   int last_lazy_deopt_pc_;
+  OutOfLineCode* ools_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/common-node-cache.cc b/src/compiler/common-node-cache.cc
new file mode 100644
index 0000000..ee1fa0f
--- /dev/null
+++ b/src/compiler/common-node-cache.cc
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-node-cache.h"
+
+#include "src/assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
+  return external_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
+}
+
+
+void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
+  int32_constants_.GetCachedNodes(nodes);
+  int64_constants_.GetCachedNodes(nodes);
+  float32_constants_.GetCachedNodes(nodes);
+  float64_constants_.GetCachedNodes(nodes);
+  external_constants_.GetCachedNodes(nodes);
+  number_constants_.GetCachedNodes(nodes);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index 1ed2b04..7ec70ae 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -5,47 +5,67 @@
 #ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
 #define V8_COMPILER_COMMON_NODE_CACHE_H_
 
-#include "src/assembler.h"
 #include "src/compiler/node-cache.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class ExternalReference;
+
+
 namespace compiler {
 
 // Bundles various caches for common nodes.
-class CommonNodeCache FINAL : public ZoneObject {
+class CommonNodeCache FINAL {
  public:
   explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+  ~CommonNodeCache() {}
 
   Node** FindInt32Constant(int32_t value) {
-    return int32_constants_.Find(zone_, value);
+    return int32_constants_.Find(zone(), value);
+  }
+
+  Node** FindInt64Constant(int64_t value) {
+    return int64_constants_.Find(zone(), value);
+  }
+
+  Node** FindFloat32Constant(float value) {
+    // We canonicalize float constants at the bit representation level.
+    return float32_constants_.Find(zone(), bit_cast<int32_t>(value));
   }
 
   Node** FindFloat64Constant(double value) {
     // We canonicalize double constants at the bit representation level.
-    return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
+    return float64_constants_.Find(zone(), bit_cast<int64_t>(value));
   }
 
-  Node** FindExternalConstant(ExternalReference reference) {
-    return external_constants_.Find(zone_, reference.address());
-  }
+  Node** FindExternalConstant(ExternalReference value);
 
   Node** FindNumberConstant(double value) {
     // We canonicalize double constants at the bit representation level.
-    return number_constants_.Find(zone_, bit_cast<int64_t>(value));
+    return number_constants_.Find(zone(), bit_cast<int64_t>(value));
   }
 
+  // Return all nodes from the cache.
+  void GetCachedNodes(ZoneVector<Node*>* nodes);
+
   Zone* zone() const { return zone_; }
 
  private:
   Int32NodeCache int32_constants_;
+  Int64NodeCache int64_constants_;
+  Int32NodeCache float32_constants_;
   Int64NodeCache float64_constants_;
-  PtrNodeCache external_constants_;
+  IntPtrNodeCache external_constants_;
   Int64NodeCache number_constants_;
   Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_COMMON_NODE_CACHE_H_
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
new file mode 100644
index 0000000..cf597ea
--- /dev/null
+++ b/src/compiler/common-operator-reducer.cc
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator-reducer.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CommonOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kPhi: {
+      int const input_count = node->InputCount();
+      if (input_count > 1) {
+        Node* const replacement = node->InputAt(0);
+        for (int i = 1; i < input_count - 1; ++i) {
+          if (node->InputAt(i) != replacement) return NoChange();
+        }
+        return Replace(replacement);
+      }
+      break;
+    }
+    case IrOpcode::kSelect: {
+      if (node->InputAt(1) == node->InputAt(2)) {
+        return Replace(node->InputAt(1));
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
new file mode 100644
index 0000000..10543db
--- /dev/null
+++ b/src/compiler/common-operator-reducer.h
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
+#define V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs strength reduction on nodes that have common operators.
+class CommonOperatorReducer FINAL : public Reducer {
+ public:
+  CommonOperatorReducer() {}
+  ~CommonOperatorReducer() FINAL {}
+
+  Reduction Reduce(Node* node) FINAL;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
diff --git a/src/compiler/common-operator-unittest.cc b/src/compiler/common-operator-unittest.cc
deleted file mode 100644
index 5001770..0000000
--- a/src/compiler/common-operator-unittest.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/common-operator.h"
-
-#include <limits>
-
-#include "src/compiler/operator-properties-inl.h"
-#include "src/test/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// -----------------------------------------------------------------------------
-// Shared operators.
-
-
-namespace {
-
-struct SharedOperator {
-  const Operator* (CommonOperatorBuilder::*constructor)();
-  IrOpcode::Value opcode;
-  Operator::Properties properties;
-  int value_input_count;
-  int effect_input_count;
-  int control_input_count;
-  int effect_output_count;
-  int control_output_count;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
-  return os << IrOpcode::Mnemonic(fop.opcode);
-}
-
-
-const SharedOperator kSharedOperators[] = {
-#define SHARED(Name, properties, value_input_count, effect_input_count,        \
-               control_input_count, effect_output_count, control_output_count) \
-  {                                                                            \
-    &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties,               \
-        value_input_count, effect_input_count, control_input_count,            \
-        effect_output_count, control_output_count                              \
-  }
-    SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
-    SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
-    SHARED(Branch, Operator::kFoldable, 1, 0, 1, 0, 2),
-    SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
-    SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
-    SHARED(Throw, Operator::kFoldable, 1, 0, 1, 0, 1),
-    SHARED(Return, Operator::kNoProperties, 1, 1, 1, 1, 1),
-    SHARED(ControlEffect, Operator::kPure, 0, 0, 1, 1, 0)
-#undef SHARED
-};
-
-
-class CommonSharedOperatorTest
-    : public TestWithZone,
-      public ::testing::WithParamInterface<SharedOperator> {};
-
-}  // namespace
-
-
-TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
-  const SharedOperator& sop = GetParam();
-  CommonOperatorBuilder common1(zone());
-  CommonOperatorBuilder common2(zone());
-  EXPECT_EQ((common1.*sop.constructor)(), (common2.*sop.constructor)());
-}
-
-
-TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
-  CommonOperatorBuilder common(zone());
-  const SharedOperator& sop = GetParam();
-  const Operator* op = (common.*sop.constructor)();
-
-  EXPECT_EQ(sop.value_input_count, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(sop.effect_input_count,
-            OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(sop.control_input_count,
-            OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(
-      sop.value_input_count + sop.effect_input_count + sop.control_input_count,
-      OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(sop.effect_output_count,
-            OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(sop.control_output_count,
-            OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(CommonSharedOperatorTest, OpcodeIsCorrect) {
-  CommonOperatorBuilder common(zone());
-  const SharedOperator& sop = GetParam();
-  const Operator* op = (common.*sop.constructor)();
-  EXPECT_EQ(sop.opcode, op->opcode());
-}
-
-
-TEST_P(CommonSharedOperatorTest, Properties) {
-  CommonOperatorBuilder common(zone());
-  const SharedOperator& sop = GetParam();
-  const Operator* op = (common.*sop.constructor)();
-  EXPECT_EQ(sop.properties, op->properties());
-}
-
-
-INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
-                        ::testing::ValuesIn(kSharedOperators));
-
-
-// -----------------------------------------------------------------------------
-// Other operators.
-
-
-namespace {
-
-class CommonOperatorTest : public TestWithZone {
- public:
-  CommonOperatorTest() : common_(zone()) {}
-  virtual ~CommonOperatorTest() {}
-
-  CommonOperatorBuilder* common() { return &common_; }
-
- private:
-  CommonOperatorBuilder common_;
-};
-
-
-const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
-
-const float kFloat32Values[] = {
-    std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
-    std::numeric_limits<float>::max()};
-
-}  // namespace
-
-
-TEST_F(CommonOperatorTest, Float32Constant) {
-  TRACED_FOREACH(float, value, kFloat32Values) {
-    const Operator* op = common()->Float32Constant(value);
-    EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
-    EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
-    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  }
-}
-
-
-TEST_F(CommonOperatorTest, ValueEffect) {
-  TRACED_FOREACH(int, arguments, kArguments) {
-    const Operator* op = common()->ValueEffect(arguments);
-    EXPECT_EQ(arguments, OperatorProperties::GetValueInputCount(op));
-    EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-    EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
-  }
-}
-
-
-TEST_F(CommonOperatorTest, Finish) {
-  TRACED_FOREACH(int, arguments, kArguments) {
-    const Operator* op = common()->Finish(arguments);
-    EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
-    EXPECT_EQ(arguments, OperatorProperties::GetEffectInputCount(op));
-    EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
-    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 19792bd..a6cca45 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -7,6 +7,8 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
 #include "src/unique.h"
 #include "src/zone.h"
 
@@ -14,209 +16,430 @@
 namespace internal {
 namespace compiler {
 
-namespace {
-
-// TODO(turbofan): Use size_t instead of int here.
-class ControlOperator : public Operator1<int> {
- public:
-  ControlOperator(IrOpcode::Value opcode, Properties properties, int inputs,
-                  int outputs, int controls, const char* mnemonic)
-      : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
-                       controls) {}
-
-  virtual OStream& PrintParameter(OStream& os) const FINAL { return os; }
-};
-
-}  // namespace
-
-
-// Specialization for static parameters of type {ExternalReference}.
-template <>
-struct StaticParameterTraits<ExternalReference> {
-  static OStream& PrintTo(OStream& os, ExternalReference reference) {
-    os << reference.address();
-    // TODO(bmeurer): Move to operator<<(os, ExternalReference)
-    const Runtime::Function* function =
-        Runtime::FunctionForEntry(reference.address());
-    if (function) {
-      os << " <" << function->name << ".entry>";
-    }
-    return os;
+std::ostream& operator<<(std::ostream& os, BranchHint hint) {
+  switch (hint) {
+    case BranchHint::kNone:
+      return os << "None";
+    case BranchHint::kTrue:
+      return os << "True";
+    case BranchHint::kFalse:
+      return os << "False";
   }
-  static int HashCode(ExternalReference reference) {
-    return bit_cast<int>(static_cast<uint32_t>(
-        reinterpret_cast<uintptr_t>(reference.address())));
+  UNREACHABLE();
+  return os;
+}
+
+
+BranchHint BranchHintOf(const Operator* const op) {
+  DCHECK_EQ(IrOpcode::kBranch, op->opcode());
+  return OpParameter<BranchHint>(op);
+}
+
+
+bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
+  return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
+}
+
+
+bool operator!=(SelectParameters const& lhs, SelectParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(SelectParameters const& p) {
+  return base::hash_combine(p.type(), p.hint());
+}
+
+
+std::ostream& operator<<(std::ostream& os, SelectParameters const& p) {
+  return os << p.type() << "|" << p.hint();
+}
+
+
+SelectParameters const& SelectParametersOf(const Operator* const op) {
+  DCHECK_EQ(IrOpcode::kSelect, op->opcode());
+  return OpParameter<SelectParameters>(op);
+}
+
+
+size_t hash_value(OutputFrameStateCombine const& sc) {
+  return base::hash_combine(sc.kind_, sc.parameter_);
+}
+
+
+std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
+  switch (sc.kind_) {
+    case OutputFrameStateCombine::kPushOutput:
+      if (sc.parameter_ == 0) return os << "Ignore";
+      return os << "Push(" << sc.parameter_ << ")";
+    case OutputFrameStateCombine::kPokeAt:
+      return os << "PokeAt(" << sc.parameter_ << ")";
   }
-  static bool Equals(ExternalReference lhs, ExternalReference rhs) {
-    return lhs == rhs;
-  }
-};
+  UNREACHABLE();
+  return os;
+}
 
 
-#define SHARED_OP_LIST(V)               \
-  V(Dead, Operator::kFoldable, 0, 0)    \
-  V(End, Operator::kFoldable, 0, 1)     \
-  V(Branch, Operator::kFoldable, 1, 1)  \
-  V(IfTrue, Operator::kFoldable, 0, 1)  \
-  V(IfFalse, Operator::kFoldable, 0, 1) \
-  V(Throw, Operator::kFoldable, 1, 1)   \
-  V(Return, Operator::kNoProperties, 1, 1)
+bool operator==(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
+  return lhs.type() == rhs.type() && lhs.bailout_id() == rhs.bailout_id() &&
+         lhs.state_combine() == rhs.state_combine();
+}
 
 
-struct CommonOperatorBuilderImpl FINAL {
-#define SHARED(Name, properties, value_input_count, control_input_count)       \
-  struct Name##Operator FINAL : public ControlOperator {                       \
-    Name##Operator()                                                           \
-        : ControlOperator(IrOpcode::k##Name, properties, value_input_count, 0, \
-                          control_input_count, #Name) {}                       \
-  };                                                                           \
+bool operator!=(FrameStateCallInfo const& lhs, FrameStateCallInfo const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(FrameStateCallInfo const& info) {
+  return base::hash_combine(info.type(), info.bailout_id(),
+                            info.state_combine());
+}
+
+
+std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
+  return os << info.type() << ", " << info.bailout_id() << ", "
+            << info.state_combine();
+}
+
+
+#define CACHED_OP_LIST(V)                     \
+  V(Dead, Operator::kFoldable, 0, 0, 0, 1)    \
+  V(End, Operator::kFoldable, 0, 0, 1, 0)     \
+  V(IfTrue, Operator::kFoldable, 0, 0, 1, 1)  \
+  V(IfFalse, Operator::kFoldable, 0, 0, 1, 1) \
+  V(Throw, Operator::kFoldable, 1, 1, 1, 1)   \
+  V(Return, Operator::kNoProperties, 1, 1, 1, 1)
+
+
+#define CACHED_LOOP_LIST(V) \
+  V(1)                      \
+  V(2)
+
+
+#define CACHED_MERGE_LIST(V) \
+  V(1)                       \
+  V(2)                       \
+  V(3)                       \
+  V(4)                       \
+  V(5)                       \
+  V(6)                       \
+  V(7)                       \
+  V(8)
+
+
+#define CACHED_PARAMETER_LIST(V) \
+  V(0)                           \
+  V(1)                           \
+  V(2)                           \
+  V(3)                           \
+  V(4)                           \
+  V(5)                           \
+  V(6)
+
+
+struct CommonOperatorGlobalCache FINAL {
+#define CACHED(Name, properties, value_input_count, effect_input_count,     \
+               control_input_count, control_output_count)                   \
+  struct Name##Operator FINAL : public Operator {                           \
+    Name##Operator()                                                        \
+        : Operator(IrOpcode::k##Name, properties, #Name, value_input_count, \
+                   effect_input_count, control_input_count, 0, 0,           \
+                   control_output_count) {}                                 \
+  };                                                                        \
   Name##Operator k##Name##Operator;
-  SHARED_OP_LIST(SHARED)
-#undef SHARED
+  CACHED_OP_LIST(CACHED)
+#undef CACHED
 
-  struct ControlEffectOperator FINAL : public SimpleOperator {
-    ControlEffectOperator()
-        : SimpleOperator(IrOpcode::kControlEffect, Operator::kPure, 0, 0,
-                         "ControlEffect") {}
+  template <BranchHint kBranchHint>
+  struct BranchOperator FINAL : public Operator1<BranchHint> {
+    BranchOperator()
+        : Operator1<BranchHint>(                       // --
+              IrOpcode::kBranch, Operator::kFoldable,  // opcode
+              "Branch",                                // name
+              1, 0, 1, 0, 0, 2,                        // counts
+              kBranchHint) {}                          // parameter
   };
-  ControlEffectOperator kControlEffectOperator;
+  BranchOperator<BranchHint::kNone> kBranchNoneOperator;
+  BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
+  BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+
+  template <size_t kInputCount>
+  struct LoopOperator FINAL : public Operator {
+    LoopOperator()
+        : Operator(                                  // --
+              IrOpcode::kLoop, Operator::kFoldable,  // opcode
+              "Loop",                                // name
+              0, 0, kInputCount, 0, 0, 1) {}         // counts
+  };
+#define CACHED_LOOP(input_count) \
+  LoopOperator<input_count> kLoop##input_count##Operator;
+  CACHED_LOOP_LIST(CACHED_LOOP)
+#undef CACHED_LOOP
+
+  template <size_t kInputCount>
+  struct MergeOperator FINAL : public Operator {
+    MergeOperator()
+        : Operator(                                   // --
+              IrOpcode::kMerge, Operator::kFoldable,  // opcode
+              "Merge",                                // name
+              0, 0, kInputCount, 0, 0, 1) {}          // counts
+  };
+#define CACHED_MERGE(input_count) \
+  MergeOperator<input_count> kMerge##input_count##Operator;
+  CACHED_MERGE_LIST(CACHED_MERGE)
+#undef CACHED_MERGE
+
+  template <int kIndex>
+  struct ParameterOperator FINAL : public Operator1<int> {
+    ParameterOperator()
+        : Operator1<int>(                             // --
+              IrOpcode::kParameter, Operator::kPure,  // opcode
+              "Parameter",                            // name
+              1, 0, 0, 1, 0, 0,                       // counts,
+              kIndex) {}                              // parameter
+  };
+#define CACHED_PARAMETER(index) \
+  ParameterOperator<index> kParameter##index##Operator;
+  CACHED_PARAMETER_LIST(CACHED_PARAMETER)
+#undef CACHED_PARAMETER
 };
 
 
-static base::LazyInstance<CommonOperatorBuilderImpl>::type kImpl =
+static base::LazyInstance<CommonOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
 
 CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
-    : impl_(kImpl.Get()), zone_(zone) {}
+    : cache_(kCache.Get()), zone_(zone) {}
 
 
-#define SHARED(Name, properties, value_input_count, control_input_count) \
-  const Operator* CommonOperatorBuilder::Name() {                        \
-    return &impl_.k##Name##Operator;                                     \
+#define CACHED(Name, properties, value_input_count, effect_input_count, \
+               control_input_count, control_output_count)               \
+  const Operator* CommonOperatorBuilder::Name() {                       \
+    return &cache_.k##Name##Operator;                                   \
   }
-SHARED_OP_LIST(SHARED)
-#undef SHARED
+CACHED_OP_LIST(CACHED)
+#undef CACHED
+
+
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
+  switch (hint) {
+    case BranchHint::kNone:
+      return &cache_.kBranchNoneOperator;
+    case BranchHint::kTrue:
+      return &cache_.kBranchTrueOperator;
+    case BranchHint::kFalse:
+      return &cache_.kBranchFalseOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
 
 
 const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
   // Outputs are formal parameters, plus context, receiver, and JSFunction.
   const int value_output_count = num_formal_parameters + 3;
-  return new (zone()) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
-                                      value_output_count, 0, "Start");
+  return new (zone()) Operator(               // --
+      IrOpcode::kStart, Operator::kFoldable,  // opcode
+      "Start",                                // name
+      0, 0, 0, value_output_count, 1, 1);     // counts
 }
 
 
-const Operator* CommonOperatorBuilder::Merge(int controls) {
-  return new (zone()) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
-                                      0, controls, "Merge");
+const Operator* CommonOperatorBuilder::Loop(int control_input_count) {
+  switch (control_input_count) {
+#define CACHED_LOOP(input_count) \
+  case input_count:              \
+    return &cache_.kLoop##input_count##Operator;
+    CACHED_LOOP_LIST(CACHED_LOOP)
+#undef CACHED_LOOP
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone()) Operator(              // --
+      IrOpcode::kLoop, Operator::kFoldable,  // opcode
+      "Loop",                                // name
+      0, 0, control_input_count, 0, 0, 1);   // counts
 }
 
 
-const Operator* CommonOperatorBuilder::Loop(int controls) {
-  return new (zone()) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
-                                      0, controls, "Loop");
+const Operator* CommonOperatorBuilder::Merge(int control_input_count) {
+  switch (control_input_count) {
+#define CACHED_MERGE(input_count) \
+  case input_count:               \
+    return &cache_.kMerge##input_count##Operator;
+    CACHED_MERGE_LIST(CACHED_MERGE)
+#undef CACHED_MERGE
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone()) Operator(               // --
+      IrOpcode::kMerge, Operator::kFoldable,  // opcode
+      "Merge",                                // name
+      0, 0, control_input_count, 0, 0, 1);    // counts
+}
+
+
+const Operator* CommonOperatorBuilder::Terminate(int effects) {
+  return new (zone()) Operator(               // --
+      IrOpcode::kTerminate, Operator::kPure,  // opcode
+      "Terminate",                            // name
+      0, effects, 1, 0, 0, 1);                // counts
 }
 
 
 const Operator* CommonOperatorBuilder::Parameter(int index) {
-  return new (zone()) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
-                                     1, "Parameter", index);
+  switch (index) {
+#define CACHED_PARAMETER(index) \
+  case index:                   \
+    return &cache_.kParameter##index##Operator;
+    CACHED_PARAMETER_LIST(CACHED_PARAMETER)
+#undef CACHED_PARAMETER
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone()) Operator1<int>(         // --
+      IrOpcode::kParameter, Operator::kPure,  // opcode
+      "Parameter",                            // name
+      1, 0, 0, 1, 0, 0,                       // counts
+      index);                                 // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
-  return new (zone()) Operator1<int32_t>(
-      IrOpcode::kInt32Constant, Operator::kPure, 0, 1, "Int32Constant", value);
+  return new (zone()) Operator1<int32_t>(         // --
+      IrOpcode::kInt32Constant, Operator::kPure,  // opcode
+      "Int32Constant",                            // name
+      0, 0, 0, 1, 0, 0,                           // counts
+      value);                                     // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
-  return new (zone()) Operator1<int64_t>(
-      IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant", value);
+  return new (zone()) Operator1<int64_t>(         // --
+      IrOpcode::kInt64Constant, Operator::kPure,  // opcode
+      "Int64Constant",                            // name
+      0, 0, 0, 1, 0, 0,                           // counts
+      value);                                     // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
   return new (zone())
-      Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
-                       "Float32Constant", value);
+      Operator1<float, base::bit_equal_to<float>, base::bit_hash<float>>(  // --
+          IrOpcode::kFloat32Constant, Operator::kPure,  // opcode
+          "Float32Constant",                            // name
+          0, 0, 0, 1, 0, 0,                             // counts
+          value);                                       // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
-  return new (zone())
-      Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
-                        "Float64Constant", value);
+  return new (zone()) Operator1<double, base::bit_equal_to<double>,
+                                base::bit_hash<double>>(  // --
+      IrOpcode::kFloat64Constant, Operator::kPure,        // opcode
+      "Float64Constant",                                  // name
+      0, 0, 0, 1, 0, 0,                                   // counts
+      value);                                             // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::ExternalConstant(
     const ExternalReference& value) {
-  return new (zone())
-      Operator1<ExternalReference>(IrOpcode::kExternalConstant, Operator::kPure,
-                                   0, 1, "ExternalConstant", value);
+  return new (zone()) Operator1<ExternalReference>(  // --
+      IrOpcode::kExternalConstant, Operator::kPure,  // opcode
+      "ExternalConstant",                            // name
+      0, 0, 0, 1, 0, 0,                              // counts
+      value);                                        // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
-  return new (zone())
-      Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
-                        "NumberConstant", value);
+  return new (zone()) Operator1<double, base::bit_equal_to<double>,
+                                base::bit_hash<double>>(  // --
+      IrOpcode::kNumberConstant, Operator::kPure,         // opcode
+      "NumberConstant",                                   // name
+      0, 0, 0, 1, 0, 0,                                   // counts
+      value);                                             // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::HeapConstant(
-    const Unique<Object>& value) {
-  return new (zone()) Operator1<Unique<Object> >(
-      IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+    const Unique<HeapObject>& value) {
+  return new (zone()) Operator1<Unique<HeapObject>>(  // --
+      IrOpcode::kHeapConstant, Operator::kPure,       // opcode
+      "HeapConstant",                                 // name
+      0, 0, 0, 1, 0, 0,                               // counts
+      value);                                         // parameter
+}
+
+
+const Operator* CommonOperatorBuilder::Select(MachineType type,
+                                              BranchHint hint) {
+  return new (zone()) Operator1<SelectParameters>(  // --
+      IrOpcode::kSelect, Operator::kPure,           // opcode
+      "Select",                                     // name
+      3, 0, 0, 1, 0, 0,                             // counts
+      SelectParameters(type, hint));                // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
-  DCHECK(arguments > 0);  // Disallow empty phis.
-  return new (zone()) Operator1<MachineType>(IrOpcode::kPhi, Operator::kPure,
-                                             arguments, 1, "Phi", type);
+  DCHECK(arguments > 0);                       // Disallow empty phis.
+  return new (zone()) Operator1<MachineType>(  // --
+      IrOpcode::kPhi, Operator::kPure,         // opcode
+      "Phi",                                   // name
+      arguments, 0, 1, 1, 0, 0,                // counts
+      type);                                   // parameter
 }
 
 
 const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
-  DCHECK(arguments > 0);  // Disallow empty phis.
-  return new (zone()) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
-                                     0, "EffectPhi", arguments);
-}
-
-
-const Operator* CommonOperatorBuilder::ControlEffect() {
-  return &impl_.kControlEffectOperator;
+  DCHECK(arguments > 0);                      // Disallow empty phis.
+  return new (zone()) Operator(               // --
+      IrOpcode::kEffectPhi, Operator::kPure,  // opcode
+      "EffectPhi",                            // name
+      0, arguments, 1, 0, 1, 0);              // counts
 }
 
 
 const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
-  DCHECK(arguments > 0);  // Disallow empty value effects.
-  return new (zone()) SimpleOperator(IrOpcode::kValueEffect, Operator::kPure,
-                                     arguments, 0, "ValueEffect");
+  DCHECK(arguments > 0);                        // Disallow empty value effects.
+  return new (zone()) Operator(                 // --
+      IrOpcode::kValueEffect, Operator::kPure,  // opcode
+      "ValueEffect",                            // name
+      arguments, 0, 0, 0, 1, 0);                // counts
 }
 
 
 const Operator* CommonOperatorBuilder::Finish(int arguments) {
-  DCHECK(arguments > 0);  // Disallow empty finishes.
-  return new (zone()) Operator1<int>(IrOpcode::kFinish, Operator::kPure, 1, 1,
-                                     "Finish", arguments);
+  DCHECK(arguments > 0);                   // Disallow empty finishes.
+  return new (zone()) Operator(            // --
+      IrOpcode::kFinish, Operator::kPure,  // opcode
+      "Finish",                            // name
+      1, arguments, 0, 1, 0, 0);           // counts
 }
 
 
 const Operator* CommonOperatorBuilder::StateValues(int arguments) {
-  return new (zone()) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
-                                     arguments, 1, "StateValues", arguments);
+  return new (zone()) Operator(                 // --
+      IrOpcode::kStateValues, Operator::kPure,  // opcode
+      "StateValues",                            // name
+      arguments, 0, 0, 1, 0, 0);                // counts
 }
 
 
 const Operator* CommonOperatorBuilder::FrameState(
     FrameStateType type, BailoutId bailout_id,
     OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
-  return new (zone()) Operator1<FrameStateCallInfo>(
-      IrOpcode::kFrameState, Operator::kPure, 4, 1, "FrameState",
+  return new (zone()) Operator1<FrameStateCallInfo>(  // --
+      IrOpcode::kFrameState, Operator::kPure,         // opcode
+      "FrameState",                                   // name
+      4, 0, 0, 1, 0, 0,                               // counts
       FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
 }
 
@@ -224,18 +447,17 @@
 const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
   class CallOperator FINAL : public Operator1<const CallDescriptor*> {
    public:
-    // TODO(titzer): Operator still uses int, whereas CallDescriptor uses
-    // size_t.
     CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
         : Operator1<const CallDescriptor*>(
-              IrOpcode::kCall, descriptor->properties(),
-              static_cast<int>(descriptor->InputCount() +
-                               descriptor->FrameStateCount()),
-              static_cast<int>(descriptor->ReturnCount()), mnemonic,
-              descriptor) {}
+              IrOpcode::kCall, descriptor->properties(), mnemonic,
+              descriptor->InputCount() + descriptor->FrameStateCount(),
+              Operator::ZeroIfPure(descriptor->properties()),
+              Operator::ZeroIfPure(descriptor->properties()),
+              descriptor->ReturnCount(),
+              Operator::ZeroIfPure(descriptor->properties()), 0, descriptor) {}
 
-    virtual OStream& PrintParameter(OStream& os) const OVERRIDE {
-      return os << "[" << *parameter() << "]";
+    void PrintParameter(std::ostream& os) const OVERRIDE {
+      os << "[" << *parameter() << "]";
     }
   };
   return new (zone()) CallOperator(descriptor, "Call");
@@ -243,8 +465,11 @@
 
 
 const Operator* CommonOperatorBuilder::Projection(size_t index) {
-  return new (zone()) Operator1<size_t>(IrOpcode::kProjection, Operator::kPure,
-                                        1, 1, "Projection", index);
+  return new (zone()) Operator1<size_t>(       // --
+      IrOpcode::kProjection, Operator::kPure,  // opcode
+      "Projection",                            // name
+      1, 0, 0, 1, 0, 0,                        // counts
+      index);                                  // parameter
 }
 
 }  // namespace compiler
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index a3659ad..af6066b 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -13,22 +13,105 @@
 
 // Forward declarations.
 class ExternalReference;
-class OStream;
 
 
 namespace compiler {
 
 // Forward declarations.
 class CallDescriptor;
-struct CommonOperatorBuilderImpl;
+struct CommonOperatorGlobalCache;
 class Operator;
 
 
+// Prediction hint for branches.
+enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
+
+inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
+
+std::ostream& operator<<(std::ostream&, BranchHint);
+
+BranchHint BranchHintOf(const Operator* const);
+
+
+class SelectParameters FINAL {
+ public:
+  explicit SelectParameters(MachineType type,
+                            BranchHint hint = BranchHint::kNone)
+      : type_(type), hint_(hint) {}
+
+  MachineType type() const { return type_; }
+  BranchHint hint() const { return hint_; }
+
+ private:
+  const MachineType type_;
+  const BranchHint hint_;
+};
+
+bool operator==(SelectParameters const&, SelectParameters const&);
+bool operator!=(SelectParameters const&, SelectParameters const&);
+
+size_t hash_value(SelectParameters const& p);
+
+std::ostream& operator<<(std::ostream&, SelectParameters const& p);
+
+SelectParameters const& SelectParametersOf(const Operator* const);
+
+
 // Flag that describes how to combine the current environment with
 // the output of a node to obtain a framestate for lazy bailout.
-enum OutputFrameStateCombine {
-  kPushOutput,   // Push the output on the expression stack.
-  kIgnoreOutput  // Use the frame state as-is.
+class OutputFrameStateCombine {
+ public:
+  enum Kind {
+    kPushOutput,  // Push the output on the expression stack.
+    kPokeAt       // Poke at the given environment location,
+                  // counting from the top of the stack.
+  };
+
+  static OutputFrameStateCombine Ignore() {
+    return OutputFrameStateCombine(kPushOutput, 0);
+  }
+  static OutputFrameStateCombine Push(size_t count = 1) {
+    return OutputFrameStateCombine(kPushOutput, count);
+  }
+  static OutputFrameStateCombine PokeAt(size_t index) {
+    return OutputFrameStateCombine(kPokeAt, index);
+  }
+
+  Kind kind() const { return kind_; }
+  size_t GetPushCount() const {
+    DCHECK_EQ(kPushOutput, kind());
+    return parameter_;
+  }
+  size_t GetOffsetToPokeAt() const {
+    DCHECK_EQ(kPokeAt, kind());
+    return parameter_;
+  }
+
+  bool IsOutputIgnored() const {
+    return kind_ == kPushOutput && parameter_ == 0;
+  }
+
+  size_t ConsumedOutputCount() const {
+    return kind_ == kPushOutput ? GetPushCount() : 1;
+  }
+
+  bool operator==(OutputFrameStateCombine const& other) const {
+    return kind_ == other.kind_ && parameter_ == other.parameter_;
+  }
+  bool operator!=(OutputFrameStateCombine const& other) const {
+    return !(*this == other);
+  }
+
+  friend size_t hash_value(OutputFrameStateCombine const&);
+  friend std::ostream& operator<<(std::ostream&,
+                                  OutputFrameStateCombine const&);
+
+ private:
+  OutputFrameStateCombine(Kind kind, size_t parameter)
+      : kind_(kind), parameter_(parameter) {}
+
+  Kind const kind_;
+  size_t const parameter_;
 };
 
 
@@ -62,24 +145,32 @@
   MaybeHandle<JSFunction> jsfunction_;
 };
 
+bool operator==(FrameStateCallInfo const&, FrameStateCallInfo const&);
+bool operator!=(FrameStateCallInfo const&, FrameStateCallInfo const&);
+
+size_t hash_value(FrameStateCallInfo const&);
+
+std::ostream& operator<<(std::ostream&, FrameStateCallInfo const&);
+
 
 // Interface for building common operators that can be used at any level of IR,
 // including JavaScript, mid-level, and low-level.
-class CommonOperatorBuilder FINAL {
+class CommonOperatorBuilder FINAL : public ZoneObject {
  public:
   explicit CommonOperatorBuilder(Zone* zone);
 
   const Operator* Dead();
   const Operator* End();
-  const Operator* Branch();
+  const Operator* Branch(BranchHint = BranchHint::kNone);
   const Operator* IfTrue();
   const Operator* IfFalse();
   const Operator* Throw();
+  const Operator* Terminate(int effects);
   const Operator* Return();
 
   const Operator* Start(int num_formal_parameters);
-  const Operator* Merge(int controls);
-  const Operator* Loop(int controls);
+  const Operator* Loop(int control_input_count);
+  const Operator* Merge(int control_input_count);
   const Operator* Parameter(int index);
 
   const Operator* Int32Constant(int32_t);
@@ -88,11 +179,11 @@
   const Operator* Float64Constant(volatile double);
   const Operator* ExternalConstant(const ExternalReference&);
   const Operator* NumberConstant(volatile double);
-  const Operator* HeapConstant(const Unique<Object>&);
+  const Operator* HeapConstant(const Unique<HeapObject>&);
 
+  const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
   const Operator* Phi(MachineType type, int arguments);
   const Operator* EffectPhi(int arguments);
-  const Operator* ControlEffect();
   const Operator* ValueEffect(int arguments);
   const Operator* Finish(int arguments);
   const Operator* StateValues(int arguments);
@@ -106,8 +197,10 @@
  private:
   Zone* zone() const { return zone_; }
 
-  const CommonOperatorBuilderImpl& impl_;
+  const CommonOperatorGlobalCache& cache_;
   Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/compiler-test-utils.h b/src/compiler/compiler-test-utils.h
deleted file mode 100644
index 437abd6..0000000
--- a/src/compiler/compiler-test-utils.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_COMPILER_TEST_UTILS_H_
-#define V8_COMPILER_COMPILER_TEST_UTILS_H_
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// The TARGET_TEST(Case, Name) macro works just like
-// TEST(Case, Name), except that the test is disabled
-// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
-#define TARGET_TEST(Case, Name) TEST(Case, Name)
-#else
-#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
-#endif
-
-
-// The TARGET_TEST_F(Case, Name) macro works just like
-// TEST_F(Case, Name), except that the test is disabled
-// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
-#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
-#else
-#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
-#endif
-
-
-// The TARGET_TEST_P(Case, Name) macro works just like
-// TEST_P(Case, Name), except that the test is disabled
-// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
-#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
-#else
-#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
-#endif
-
-
-// The TARGET_TYPED_TEST(Case, Name) macro works just like
-// TYPED_TEST(Case, Name), except that the test is disabled
-// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
-#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
-#else
-#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
-#endif
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_COMPILER_TEST_UTILS_H_
diff --git a/src/compiler/compiler.gyp b/src/compiler/compiler.gyp
deleted file mode 100644
index ec5ec28..0000000
--- a/src/compiler/compiler.gyp
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
-  'variables': {
-    'v8_code': 1,
-  },
-  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
-  'targets': [
-    {
-      'target_name': 'compiler-unittests',
-      'type': 'executable',
-      'dependencies': [
-        '../test/test.gyp:run-all-unittests',
-      ],
-      'include_dirs': [
-        '../..',
-      ],
-      'sources': [  ### gcmole(all) ###
-        'change-lowering-unittest.cc',
-        'common-operator-unittest.cc',
-        'compiler-test-utils.h',
-        'graph-reducer-unittest.cc',
-        'graph-unittest.cc',
-        'graph-unittest.h',
-        'instruction-selector-unittest.cc',
-        'instruction-selector-unittest.h',
-        'js-builtin-reducer-unittest.cc',
-        'machine-operator-reducer-unittest.cc',
-        'machine-operator-unittest.cc',
-        'simplified-operator-reducer-unittest.cc',
-        'simplified-operator-unittest.cc',
-        'value-numbering-reducer-unittest.cc',
-      ],
-      'conditions': [
-        ['v8_target_arch=="arm"', {
-          'sources': [  ### gcmole(arch:arm) ###
-            'arm/instruction-selector-arm-unittest.cc',
-          ],
-        }],
-        ['v8_target_arch=="arm64"', {
-          'sources': [  ### gcmole(arch:arm64) ###
-            'arm64/instruction-selector-arm64-unittest.cc',
-          ],
-        }],
-        ['v8_target_arch=="ia32"', {
-          'sources': [  ### gcmole(arch:ia32) ###
-            'ia32/instruction-selector-ia32-unittest.cc',
-          ],
-        }],
-        ['v8_target_arch=="x64"', {
-          'sources': [  ### gcmole(arch:x64) ###
-            'x64/instruction-selector-x64-unittest.cc',
-          ],
-        }],
-      ],
-    },
-  ],
-}
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index 3b7d05b..8725244 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -9,8 +9,8 @@
 namespace compiler {
 
 
-void IfBuilder::If(Node* condition) {
-  builder_->NewBranch(condition);
+void IfBuilder::If(Node* condition, BranchHint hint) {
+  builder_->NewBranch(condition, hint);
   else_environment_ = environment()->CopyForConditional();
 }
 
@@ -32,9 +32,9 @@
 }
 
 
-void LoopBuilder::BeginLoop() {
+void LoopBuilder::BeginLoop(BitVector* assigned) {
   builder_->NewLoop();
-  loop_environment_ = environment()->CopyForLoop();
+  loop_environment_ = environment()->CopyForLoop(assigned);
   continue_environment_ = environment()->CopyAsUnreachable();
   break_environment_ = environment()->CopyAsUnreachable();
 }
@@ -78,7 +78,6 @@
   body_environment_ = environment()->CopyAsUnreachable();
   label_environment_ = environment()->CopyAsUnreachable();
   break_environment_ = environment()->CopyAsUnreachable();
-  body_environments_.AddBlock(NULL, case_count(), zone());
 }
 
 
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index 695282b..11adfdb 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -14,7 +14,6 @@
 namespace internal {
 namespace compiler {
 
-
 // Base class for all control builders. Also provides a common interface for
 // control builders to handle 'break' and 'continue' statements when they are
 // used to model breakable statements.
@@ -32,7 +31,7 @@
   typedef StructuredGraphBuilder Builder;
   typedef StructuredGraphBuilder::Environment Environment;
 
-  Zone* zone() const { return builder_->zone(); }
+  Zone* zone() const { return builder_->local_zone(); }
   Environment* environment() { return builder_->environment(); }
   void set_environment(Environment* env) { builder_->set_environment(env); }
 
@@ -41,7 +40,7 @@
 
 
 // Tracks control flow for a conditional statement.
-class IfBuilder : public ControlBuilder {
+class IfBuilder FINAL : public ControlBuilder {
  public:
   explicit IfBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder),
@@ -49,7 +48,7 @@
         else_environment_(NULL) {}
 
   // Primitive control commands.
-  void If(Node* condition);
+  void If(Node* condition, BranchHint hint = BranchHint::kNone);
   void Then();
   void Else();
   void End();
@@ -61,7 +60,7 @@
 
 
 // Tracks control flow for an iteration statement.
-class LoopBuilder : public ControlBuilder {
+class LoopBuilder FINAL : public ControlBuilder {
  public:
   explicit LoopBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder),
@@ -70,13 +69,13 @@
         break_environment_(NULL) {}
 
   // Primitive control commands.
-  void BeginLoop();
+  void BeginLoop(BitVector* assigned);
   void EndBody();
   void EndLoop();
 
   // Primitive support for break and continue.
-  virtual void Continue();
-  virtual void Break();
+  void Continue() FINAL;
+  void Break() FINAL;
 
   // Compound control command for conditional break.
   void BreakUnless(Node* condition);
@@ -89,7 +88,7 @@
 
 
 // Tracks control flow for a switch statement.
-class SwitchBuilder : public ControlBuilder {
+class SwitchBuilder FINAL : public ControlBuilder {
  public:
   explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
       : ControlBuilder(builder),
@@ -108,21 +107,21 @@
   void EndSwitch();
 
   // Primitive support for break.
-  virtual void Break();
+  void Break() FINAL;
 
   // The number of cases within a switch is statically known.
-  int case_count() const { return body_environments_.capacity(); }
+  size_t case_count() const { return body_environments_.size(); }
 
  private:
   Environment* body_environment_;   // Environment after last case body.
   Environment* label_environment_;  // Environment for next label condition.
   Environment* break_environment_;  // Environment after the switch exits.
-  ZoneList<Environment*> body_environments_;
+  ZoneVector<Environment*> body_environments_;
 };
 
 
 // Tracks control flow for a block statement.
-class BlockBuilder : public ControlBuilder {
+class BlockBuilder FINAL : public ControlBuilder {
  public:
   explicit BlockBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder), break_environment_(NULL) {}
@@ -132,13 +131,14 @@
   void EndBlock();
 
   // Primitive support for break.
-  virtual void Break();
+  void Break() FINAL;
 
  private:
   Environment* break_environment_;  // Environment after the block exits.
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
new file mode 100644
index 0000000..cca087f
--- /dev/null
+++ b/src/compiler/control-equivalence.h
@@ -0,0 +1,361 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
+#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Determines control dependence equivalence classes for control nodes. Any two
+// nodes having the same set of control dependences land in one class. These
+// classes can in turn be used to:
+//  - Build a program structure tree (PST) for controls in the graph.
+//  - Determine single-entry single-exit (SESE) regions within the graph.
+//
+// Note that this implementation actually uses cycle equivalence to establish
+// class numbers. Any two nodes are cycle equivalent if they occur in the same
+// set of cycles. It can be shown that control dependence equivalence reduces
+// to undirected cycle equivalence for strongly connected control flow graphs.
+//
+// The algorithm is based on the paper, "The program structure tree: computing
+// control regions in linear time" by Johnson, Pearson & Pingali (PLDI94) which
+// also contains proofs for the aforementioned equivalence. References to line
+// numbers in the algorithm from figure 4 have been added [line:x].
+class ControlEquivalence : public ZoneObject {
+ public:
+  ControlEquivalence(Zone* zone, Graph* graph)
+      : zone_(zone),
+        graph_(graph),
+        dfs_number_(0),
+        class_number_(1),
+        node_data_(graph->NodeCount(), EmptyData(), zone) {}
+
+  // Run the main algorithm starting from the {exit} control node. This causes
+  // the following iterations over control edges of the graph:
+  //  1) A breadth-first backwards traversal to determine the set of nodes that
+  //     participate in the next step. Takes O(E) time and O(N) space.
+  //  2) An undirected depth-first backwards traversal that determines class
+  //     numbers for all participating nodes. Takes O(E) time and O(N) space.
+  void Run(Node* exit) {
+    if (GetClass(exit) != kInvalidClass) return;
+    DetermineParticipation(exit);
+    RunUndirectedDFS(exit);
+  }
+
+  // Retrieves a previously computed class number.
+  size_t ClassOf(Node* node) {
+    DCHECK(GetClass(node) != kInvalidClass);
+    return GetClass(node);
+  }
+
+ private:
+  static const size_t kInvalidClass = static_cast<size_t>(-1);
+  typedef enum { kInputDirection, kUseDirection } DFSDirection;
+
+  struct Bracket {
+    DFSDirection direction;  // Direction in which this bracket was added.
+    size_t recent_class;     // Cached class when bracket was topmost.
+    size_t recent_size;      // Cached set-size when bracket was topmost.
+    Node* from;              // Node that this bracket originates from.
+    Node* to;                // Node that this bracket points to.
+  };
+
+  // The set of brackets for each node during the DFS walk.
+  typedef ZoneLinkedList<Bracket> BracketList;
+
+  struct DFSStackEntry {
+    DFSDirection direction;            // Direction currently used in DFS walk.
+    Node::InputEdges::iterator input;  // Iterator used for "input" direction.
+    Node::UseEdges::iterator use;      // Iterator used for "use" direction.
+    Node* parent_node;                 // Parent node of entry during DFS walk.
+    Node* node;                        // Node that this stack entry belongs to.
+  };
+
+  // The stack is used during the undirected DFS walk.
+  typedef ZoneStack<DFSStackEntry> DFSStack;
+
+  struct NodeData {
+    size_t class_number;  // Equivalence class number assigned to node.
+    size_t dfs_number;    // Pre-order DFS number assigned to node.
+    bool visited;         // Indicates node has already been visited.
+    bool on_stack;        // Indicates node is on DFS stack during walk.
+    bool participates;    // Indicates node participates in DFS walk.
+    BracketList blist;    // List of brackets per node.
+  };
+
+  // The per-node data computed during the DFS walk.
+  typedef ZoneVector<NodeData> Data;
+
+  // Called at pre-visit during DFS walk.
+  void VisitPre(Node* node) {
+    Trace("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+
+    // Dispense a new pre-order number.
+    SetNumber(node, NewDFSNumber());
+    Trace("  Assigned DFS number is %d\n", GetNumber(node));
+  }
+
+  // Called at mid-visit during DFS walk.
+  void VisitMid(Node* node, DFSDirection direction) {
+    Trace("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+    BracketList& blist = GetBracketList(node);
+
+    // Remove brackets pointing to this node [line:19].
+    BracketListDelete(blist, node, direction);
+
+    // Potentially introduce artificial dependency from start to end.
+    if (blist.empty()) {
+      DCHECK_EQ(kInputDirection, direction);
+      VisitBackedge(node, graph_->end(), kInputDirection);
+    }
+
+    // Potentially start a new equivalence class [line:37].
+    BracketListTrace(blist);
+    Bracket* recent = &blist.back();
+    if (recent->recent_size != blist.size()) {
+      recent->recent_size = blist.size();
+      recent->recent_class = NewClassNumber();
+    }
+
+    // Assign equivalence class to node.
+    SetClass(node, recent->recent_class);
+    Trace("  Assigned class number is %d\n", GetClass(node));
+  }
+
+  // Called at post-visit during DFS walk.
+  void VisitPost(Node* node, Node* parent_node, DFSDirection direction) {
+    Trace("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+    BracketList& blist = GetBracketList(node);
+
+    // Remove brackets pointing to this node [line:19].
+    BracketListDelete(blist, node, direction);
+
+    // Propagate bracket list up the DFS tree [line:13].
+    if (parent_node != NULL) {
+      BracketList& parent_blist = GetBracketList(parent_node);
+      parent_blist.splice(parent_blist.end(), blist);
+    }
+  }
+
+  // Called when hitting a back edge in the DFS walk.
+  void VisitBackedge(Node* from, Node* to, DFSDirection direction) {
+    Trace("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
+          from->op()->mnemonic(), to->id(), to->op()->mnemonic());
+
+    // Push backedge onto the bracket list [line:25].
+    Bracket bracket = {direction, kInvalidClass, 0, from, to};
+    GetBracketList(from).push_back(bracket);
+  }
+
+  // Performs and undirected DFS walk of the graph. Conceptually all nodes are
+  // expanded, splitting "input" and "use" out into separate nodes. During the
+  // traversal, edges towards the representative nodes are preferred.
+  //
+  //   \ /        - Pre-visit: When N1 is visited in direction D the preferred
+  //    x   N1      edge towards N is taken next, calling VisitPre(N).
+  //    |         - Mid-visit: After all edges out of N2 in direction D have
+  //    |   N       been visited, we switch the direction and start considering
+  //    |           edges out of N1 now, and we call VisitMid(N).
+  //    x   N2    - Post-visit: After all edges out of N1 in direction opposite
+  //   / \          to D have been visited, we pop N and call VisitPost(N).
+  //
+  // This will yield a true spanning tree (without cross or forward edges) and
+  // also discover proper back edges in both directions.
+  void RunUndirectedDFS(Node* exit) {
+    ZoneStack<DFSStackEntry> stack(zone_);
+    DFSPush(stack, exit, NULL, kInputDirection);
+    VisitPre(exit);
+
+    while (!stack.empty()) {  // Undirected depth-first backwards traversal.
+      DFSStackEntry& entry = stack.top();
+      Node* node = entry.node;
+
+      if (entry.direction == kInputDirection) {
+        if (entry.input != node->input_edges().end()) {
+          Edge edge = *entry.input;
+          Node* input = edge.to();
+          ++(entry.input);
+          if (NodeProperties::IsControlEdge(edge) &&
+              NodeProperties::IsControl(input)) {
+            // Visit next control input.
+            if (!GetData(input)->participates) continue;
+            if (GetData(input)->visited) continue;
+            if (GetData(input)->on_stack) {
+              // Found backedge if input is on stack.
+              if (input != entry.parent_node) {
+                VisitBackedge(node, input, kInputDirection);
+              }
+            } else {
+              // Push input onto stack.
+              DFSPush(stack, input, node, kInputDirection);
+              VisitPre(input);
+            }
+          }
+          continue;
+        }
+        if (entry.use != node->use_edges().end()) {
+          // Switch direction to uses.
+          entry.direction = kUseDirection;
+          VisitMid(node, kInputDirection);
+          continue;
+        }
+      }
+
+      if (entry.direction == kUseDirection) {
+        if (entry.use != node->use_edges().end()) {
+          Edge edge = *entry.use;
+          Node* use = edge.from();
+          ++(entry.use);
+          if (NodeProperties::IsControlEdge(edge) &&
+              NodeProperties::IsControl(use)) {
+            // Visit next control use.
+            if (!GetData(use)->participates) continue;
+            if (GetData(use)->visited) continue;
+            if (GetData(use)->on_stack) {
+              // Found backedge if use is on stack.
+              if (use != entry.parent_node) {
+                VisitBackedge(node, use, kUseDirection);
+              }
+            } else {
+              // Push use onto stack.
+              DFSPush(stack, use, node, kUseDirection);
+              VisitPre(use);
+            }
+          }
+          continue;
+        }
+        if (entry.input != node->input_edges().end()) {
+          // Switch direction to inputs.
+          entry.direction = kInputDirection;
+          VisitMid(node, kUseDirection);
+          continue;
+        }
+      }
+
+      // Pop node from stack when done with all inputs and uses.
+      DCHECK(entry.input == node->input_edges().end());
+      DCHECK(entry.use == node->use_edges().end());
+      DFSPop(stack, node);
+      VisitPost(node, entry.parent_node, entry.direction);
+    }
+  }
+
+  void DetermineParticipationEnqueue(ZoneQueue<Node*>& queue, Node* node) {
+    if (!GetData(node)->participates) {
+      GetData(node)->participates = true;
+      queue.push(node);
+    }
+  }
+
+  void DetermineParticipation(Node* exit) {
+    ZoneQueue<Node*> queue(zone_);
+    DetermineParticipationEnqueue(queue, exit);
+    while (!queue.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue.front();
+      queue.pop();
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        DetermineParticipationEnqueue(queue, node->InputAt(i));
+      }
+    }
+  }
+
+ private:
+  NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
+  int NewClassNumber() { return class_number_++; }
+  int NewDFSNumber() { return dfs_number_++; }
+
+  // Template used to initialize per-node data.
+  NodeData EmptyData() {
+    return {kInvalidClass, 0, false, false, false, BracketList(zone_)};
+  }
+
+  // Accessors for the DFS number stored within the per-node data.
+  size_t GetNumber(Node* node) { return GetData(node)->dfs_number; }
+  void SetNumber(Node* node, size_t number) {
+    GetData(node)->dfs_number = number;
+  }
+
+  // Accessors for the equivalence class stored within the per-node data.
+  size_t GetClass(Node* node) { return GetData(node)->class_number; }
+  void SetClass(Node* node, size_t number) {
+    GetData(node)->class_number = number;
+  }
+
+  // Accessors for the bracket list stored within the per-node data.
+  BracketList& GetBracketList(Node* node) { return GetData(node)->blist; }
+  void SetBracketList(Node* node, BracketList& list) {
+    GetData(node)->blist = list;
+  }
+
+  // Mutates the DFS stack by pushing an entry.
+  void DFSPush(DFSStack& stack, Node* node, Node* from, DFSDirection dir) {
+    DCHECK(GetData(node)->participates);
+    DCHECK(!GetData(node)->visited);
+    GetData(node)->on_stack = true;
+    Node::InputEdges::iterator input = node->input_edges().begin();
+    Node::UseEdges::iterator use = node->use_edges().begin();
+    stack.push({dir, input, use, from, node});
+  }
+
+  // Mutates the DFS stack by popping an entry.
+  void DFSPop(DFSStack& stack, Node* node) {
+    DCHECK_EQ(stack.top().node, node);
+    GetData(node)->on_stack = false;
+    GetData(node)->visited = true;
+    stack.pop();
+  }
+
+  // TODO(mstarzinger): Optimize this to avoid linear search.
+  void BracketListDelete(BracketList& blist, Node* to, DFSDirection direction) {
+    for (BracketList::iterator i = blist.begin(); i != blist.end(); /*nop*/) {
+      if (i->to == to && i->direction != direction) {
+        Trace("  BList erased: {%d->%d}\n", i->from->id(), i->to->id());
+        i = blist.erase(i);
+      } else {
+        ++i;
+      }
+    }
+  }
+
+  void BracketListTrace(BracketList& blist) {
+    if (FLAG_trace_turbo_scheduler) {
+      Trace("  BList: ");
+      for (Bracket bracket : blist) {
+        Trace("{%d->%d} ", bracket.from->id(), bracket.to->id());
+      }
+      Trace("\n");
+    }
+  }
+
+  void Trace(const char* msg, ...) {
+    if (FLAG_trace_turbo_scheduler) {
+      va_list arguments;
+      va_start(arguments, msg);
+      base::OS::VPrint(msg, arguments);
+      va_end(arguments);
+    }
+  }
+
+  Zone* zone_;
+  Graph* graph_;
+  int dfs_number_;    // Generates new DFS pre-order numbers on demand.
+  int class_number_;  // Generates new equivalence class numbers on demand.
+  Data node_data_;    // Per-node data stored as a side-table.
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CONTROL_EQUIVALENCE_H_
diff --git a/src/compiler/control-reducer.cc b/src/compiler/control-reducer.cc
new file mode 100644
index 0000000..eef8a49
--- /dev/null
+++ b/src/compiler/control-reducer.cc
@@ -0,0 +1,592 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/control-reducer.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
+enum Decision { kFalse, kUnknown, kTrue };
+
+class ReachabilityMarker : public NodeMarker<uint8_t> {
+ public:
+  explicit ReachabilityMarker(Graph* graph) : NodeMarker<uint8_t>(graph, 8) {}
+  bool SetReachableFromEnd(Node* node) {
+    uint8_t before = Get(node);
+    Set(node, before | kFromEnd);
+    return before & kFromEnd;
+  }
+  bool IsReachableFromEnd(Node* node) { return Get(node) & kFromEnd; }
+  bool SetReachableFromStart(Node* node) {
+    uint8_t before = Get(node);
+    Set(node, before | kFromStart);
+    return before & kFromStart;
+  }
+  bool IsReachableFromStart(Node* node) { return Get(node) & kFromStart; }
+  void Push(Node* node) { Set(node, Get(node) | kFwStack); }
+  void Pop(Node* node) { Set(node, Get(node) & ~kFwStack); }
+  bool IsOnStack(Node* node) { return Get(node) & kFwStack; }
+
+ private:
+  enum Bit { kFromEnd = 1, kFromStart = 2, kFwStack = 4 };
+};
+
+
+#define TRACE(x) \
+  if (FLAG_trace_turbo_reduction) PrintF x
+
+class ControlReducerImpl {
+ public:
+  ControlReducerImpl(Zone* zone, JSGraph* jsgraph,
+                     CommonOperatorBuilder* common)
+      : zone_(zone),
+        jsgraph_(jsgraph),
+        common_(common),
+        state_(jsgraph->graph()->NodeCount(), kUnvisited, zone_),
+        stack_(zone_),
+        revisit_(zone_),
+        dead_(NULL) {}
+
+  Zone* zone_;
+  JSGraph* jsgraph_;
+  CommonOperatorBuilder* common_;
+  ZoneVector<VisitState> state_;
+  ZoneDeque<Node*> stack_;
+  ZoneDeque<Node*> revisit_;
+  Node* dead_;
+
+  void Reduce() {
+    Push(graph()->end());
+    do {
+      // Process the node on the top of the stack, potentially pushing more
+      // or popping the node off the stack.
+      ReduceTop();
+      // If the stack becomes empty, revisit any nodes in the revisit queue.
+      // If no nodes in the revisit queue, try removing dead loops.
+      // If no dead loops, then finish.
+    } while (!stack_.empty() || TryRevisit() || RepairAndRemoveLoops());
+  }
+
+  bool TryRevisit() {
+    while (!revisit_.empty()) {
+      Node* n = revisit_.back();
+      revisit_.pop_back();
+      if (state_[n->id()] == kRevisit) {  // state can change while in queue.
+        Push(n);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Repair the graph after the possible creation of non-terminating or dead
+  // loops. Removing dead loops can produce more opportunities for reduction.
+  bool RepairAndRemoveLoops() {
+    // TODO(turbofan): we can skip this if the graph has no loops, but
+    // we have to be careful about proper loop detection during reduction.
+
+    // Gather all nodes backwards-reachable from end (through inputs).
+    ReachabilityMarker marked(graph());
+    NodeVector nodes(zone_);
+    AddNodesReachableFromEnd(marked, nodes);
+
+    // Walk forward through control nodes, looking for back edges to nodes
+    // that are not connected to end. Those are non-terminating loops (NTLs).
+    Node* start = graph()->start();
+    marked.Push(start);
+    marked.SetReachableFromStart(start);
+
+    // We use a stack of (Node, UseIter) pairs to avoid O(n^2) traversal.
+    typedef std::pair<Node*, UseIter> FwIter;
+    ZoneVector<FwIter> fw_stack(zone_);
+    fw_stack.push_back(FwIter(start, start->uses().begin()));
+
+    while (!fw_stack.empty()) {
+      Node* node = fw_stack.back().first;
+      TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
+      bool pop = true;
+      while (fw_stack.back().second != node->uses().end()) {
+        Node* succ = *(fw_stack.back().second);
+        if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
+          // {succ} is on stack and not reachable from end.
+          Node* added = ConnectNTL(succ);
+          nodes.push_back(added);
+          marked.SetReachableFromEnd(added);
+          AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
+
+          // Reset the use iterators for the entire stack.
+          for (size_t i = 0; i < fw_stack.size(); i++) {
+            FwIter& iter = fw_stack[i];
+            fw_stack[i] = FwIter(iter.first, iter.first->uses().begin());
+          }
+          pop = false;  // restart traversing successors of this node.
+          break;
+        }
+        if (IrOpcode::IsControlOpcode(succ->opcode()) &&
+            !marked.IsReachableFromStart(succ)) {
+          // {succ} is a control node and not yet reached from start.
+          marked.Push(succ);
+          marked.SetReachableFromStart(succ);
+          fw_stack.push_back(FwIter(succ, succ->uses().begin()));
+          pop = false;  // "recurse" into successor control node.
+          break;
+        }
+        ++fw_stack.back().second;
+      }
+      if (pop) {
+        marked.Pop(node);
+        fw_stack.pop_back();
+      }
+    }
+
+    // Trim references from dead nodes to live nodes first.
+    jsgraph_->GetCachedNodes(&nodes);
+    TrimNodes(marked, nodes);
+
+    // Any control nodes not reachable from start are dead, even loops.
+    for (size_t i = 0; i < nodes.size(); i++) {
+      Node* node = nodes[i];
+      if (IrOpcode::IsControlOpcode(node->opcode()) &&
+          !marked.IsReachableFromStart(node)) {
+        ReplaceNode(node, dead());  // uses will be added to revisit queue.
+      }
+    }
+    return TryRevisit();  // try to push a node onto the stack.
+  }
+
+  // Connect {loop}, the header of a non-terminating loop, to the end node.
+  Node* ConnectNTL(Node* loop) {
+    TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
+
+    if (loop->opcode() != IrOpcode::kTerminate) {
+      // Insert a {Terminate} node if the loop has effects.
+      ZoneDeque<Node*> effects(zone_);
+      for (Node* const use : loop->uses()) {
+        if (use->opcode() == IrOpcode::kEffectPhi) effects.push_back(use);
+      }
+      int count = static_cast<int>(effects.size());
+      if (count > 0) {
+        Node** inputs = zone_->NewArray<Node*>(1 + count);
+        for (int i = 0; i < count; i++) inputs[i] = effects[i];
+        inputs[count] = loop;
+        loop = graph()->NewNode(common_->Terminate(count), 1 + count, inputs);
+        TRACE(("AddTerminate: #%d:%s[%d]\n", loop->id(), loop->op()->mnemonic(),
+               count));
+      }
+    }
+
+    Node* to_add = loop;
+    Node* end = graph()->end();
+    CHECK_EQ(IrOpcode::kEnd, end->opcode());
+    Node* merge = end->InputAt(0);
+    if (merge == NULL || merge->opcode() == IrOpcode::kDead) {
+      // The end node died; just connect end to {loop}.
+      end->ReplaceInput(0, loop);
+    } else if (merge->opcode() != IrOpcode::kMerge) {
+      // Introduce a final merge node for {end->InputAt(0)} and {loop}.
+      merge = graph()->NewNode(common_->Merge(2), merge, loop);
+      end->ReplaceInput(0, merge);
+      to_add = merge;
+      // Mark the node as visited so that we can revisit later.
+      EnsureStateSize(merge->id());
+      state_[merge->id()] = kVisited;
+    } else {
+      // Append a new input to the final merge at the end.
+      merge->AppendInput(graph()->zone(), loop);
+      merge->set_op(common_->Merge(merge->InputCount()));
+    }
+    return to_add;
+  }
+
+  void AddNodesReachableFromEnd(ReachabilityMarker& marked, NodeVector& nodes) {
+    Node* end = graph()->end();
+    marked.SetReachableFromEnd(end);
+    if (!end->IsDead()) {
+      nodes.push_back(end);
+      AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
+    }
+  }
+
+  void AddBackwardsReachableNodes(ReachabilityMarker& marked, NodeVector& nodes,
+                                  size_t cursor) {
+    while (cursor < nodes.size()) {
+      Node* node = nodes[cursor++];
+      for (Node* const input : node->inputs()) {
+        if (!marked.SetReachableFromEnd(input)) {
+          nodes.push_back(input);
+        }
+      }
+    }
+  }
+
+  void Trim() {
+    // Gather all nodes backwards-reachable from end through inputs.
+    ReachabilityMarker marked(graph());
+    NodeVector nodes(zone_);
+    AddNodesReachableFromEnd(marked, nodes);
+
+    // Process cached nodes in the JSGraph too.
+    jsgraph_->GetCachedNodes(&nodes);
+    TrimNodes(marked, nodes);
+  }
+
+  void TrimNodes(ReachabilityMarker& marked, NodeVector& nodes) {
+    // Remove dead->live edges.
+    for (size_t j = 0; j < nodes.size(); j++) {
+      Node* node = nodes[j];
+      for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        if (!marked.IsReachableFromEnd(use)) {
+          TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
+                 use->op()->mnemonic(), edge.index(), node->id(),
+                 node->op()->mnemonic()));
+          edge.UpdateTo(NULL);
+        }
+      }
+    }
+#if DEBUG
+    // Verify that no inputs to live nodes are NULL.
+    for (size_t j = 0; j < nodes.size(); j++) {
+      Node* node = nodes[j];
+      for (Node* const input : node->inputs()) {
+        CHECK_NE(NULL, input);
+      }
+      for (Node* const use : node->uses()) {
+        CHECK(marked.IsReachableFromEnd(use));
+      }
+    }
+#endif
+  }
+
+  // Reduce the node on the top of the stack.
+  // If an input {i} is not yet visited or needs to be revisited, push {i} onto
+  // the stack and return. Otherwise, all inputs are visited, so apply
+  // reductions for {node} and pop it off the stack.
+  void ReduceTop() {
+    size_t height = stack_.size();
+    Node* node = stack_.back();
+
+    if (node->IsDead()) return Pop();  // Node was killed while on stack.
+
+    TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
+
+    // Recurse on an input if necessary.
+    for (Node* const input : node->inputs()) {
+      if (Recurse(input)) return;
+    }
+
+    // All inputs should be visited or on stack. Apply reductions to node.
+    Node* replacement = ReduceNode(node);
+    if (replacement != node) ReplaceNode(node, replacement);
+
+    // After reducing the node, pop it off the stack.
+    CHECK_EQ(static_cast<int>(height), static_cast<int>(stack_.size()));
+    Pop();
+
+    // If there was a replacement, reduce it after popping {node}.
+    if (replacement != node) Recurse(replacement);
+  }
+
+  void EnsureStateSize(size_t id) {
+    if (id >= state_.size()) {
+      state_.resize((3 * id) / 2, kUnvisited);
+    }
+  }
+
+  // Push a node onto the stack if its state is {kUnvisited} or {kRevisit}.
+  bool Recurse(Node* node) {
+    size_t id = static_cast<size_t>(node->id());
+    EnsureStateSize(id);
+    if (state_[id] != kRevisit && state_[id] != kUnvisited) return false;
+    Push(node);
+    return true;
+  }
+
+  void Push(Node* node) {
+    state_[node->id()] = kOnStack;
+    stack_.push_back(node);
+  }
+
+  void Pop() {
+    int pos = static_cast<int>(stack_.size()) - 1;
+    DCHECK_GE(pos, 0);
+    DCHECK_EQ(kOnStack, state_[stack_[pos]->id()]);
+    state_[stack_[pos]->id()] = kVisited;
+    stack_.pop_back();
+  }
+
+  // Queue a node to be revisited if it has been visited once already.
+  void Revisit(Node* node) {
+    size_t id = static_cast<size_t>(node->id());
+    if (id < state_.size() && state_[id] == kVisited) {
+      TRACE(("  Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
+      state_[id] = kRevisit;
+      revisit_.push_back(node);
+    }
+  }
+
+  Node* dead() {
+    if (dead_ == NULL) dead_ = graph()->NewNode(common_->Dead());
+    return dead_;
+  }
+
+  //===========================================================================
+  // Reducer implementation: perform reductions on a node.
+  //===========================================================================
+  Node* ReduceNode(Node* node) {
+    if (node->op()->ControlInputCount() == 1) {
+      // If a node has only one control input and it is dead, replace with dead.
+      Node* control = NodeProperties::GetControlInput(node);
+      if (control->opcode() == IrOpcode::kDead) {
+        TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
+        return control;
+      }
+    }
+
+    // Reduce branches, phis, and merges.
+    switch (node->opcode()) {
+      case IrOpcode::kBranch:
+        return ReduceBranch(node);
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+        return ReduceMerge(node);
+      case IrOpcode::kSelect:
+        return ReduceSelect(node);
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi:
+        return ReducePhi(node);
+      default:
+        return node;
+    }
+  }
+
+  // Try to statically fold a condition.
+  Decision DecideCondition(Node* cond) {
+    switch (cond->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return Int32Matcher(cond).Is(0) ? kFalse : kTrue;
+      case IrOpcode::kInt64Constant:
+        return Int64Matcher(cond).Is(0) ? kFalse : kTrue;
+      case IrOpcode::kNumberConstant:
+        return NumberMatcher(cond).Is(0) ? kFalse : kTrue;
+      case IrOpcode::kHeapConstant: {
+        Handle<Object> object =
+            HeapObjectMatcher<Object>(cond).Value().handle();
+        if (object->IsTrue()) return kTrue;
+        if (object->IsFalse()) return kFalse;
+        // TODO(turbofan): decide more conditions for heap constants.
+        break;
+      }
+      default:
+        break;
+    }
+    return kUnknown;
+  }
+
+  // Reduce redundant selects.
+  Node* ReduceSelect(Node* const node) {
+    Node* const tvalue = node->InputAt(1);
+    Node* const fvalue = node->InputAt(2);
+    if (tvalue == fvalue) return tvalue;
+    Decision result = DecideCondition(node->InputAt(0));
+    if (result == kTrue) return tvalue;
+    if (result == kFalse) return fvalue;
+    return node;
+  }
+
+  // Reduce redundant phis.
+  Node* ReducePhi(Node* node) {
+    int n = node->InputCount();
+    if (n <= 1) return dead();            // No non-control inputs.
+    if (n == 2) return node->InputAt(0);  // Only one non-control input.
+
+    // Never remove an effect phi from a (potentially non-terminating) loop.
+    // Otherwise, we might end up eliminating effect nodes, such as calls,
+    // before the loop.
+    if (node->opcode() == IrOpcode::kEffectPhi &&
+        NodeProperties::GetControlInput(node)->opcode() == IrOpcode::kLoop) {
+      return node;
+    }
+
+    Node* replacement = NULL;
+    Node::Inputs inputs = node->inputs();
+    for (InputIter it = inputs.begin(); n > 1; --n, ++it) {
+      Node* input = *it;
+      if (input->opcode() == IrOpcode::kDead) continue;  // ignore dead inputs.
+      if (input != node && input != replacement) {       // non-redundant input.
+        if (replacement != NULL) return node;
+        replacement = input;
+      }
+    }
+    return replacement == NULL ? dead() : replacement;
+  }
+
+  // Reduce merges by trimming away dead inputs from the merge and phis.
+  Node* ReduceMerge(Node* node) {
+    // Count the number of live inputs.
+    int live = 0;
+    int index = 0;
+    int live_index = 0;
+    for (Node* const input : node->inputs()) {
+      if (input->opcode() != IrOpcode::kDead) {
+        live++;
+        live_index = index;
+      }
+      index++;
+    }
+
+    if (live > 1 && live == node->InputCount()) return node;  // nothing to do.
+
+    TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
+           node->op()->mnemonic(), live));
+
+    if (live == 0) return dead();  // no remaining inputs.
+
+    // Gather phis and effect phis to be edited.
+    ZoneVector<Node*> phis(zone_);
+    for (Node* const use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        phis.push_back(use);
+      }
+    }
+
+    if (live == 1) {
+      // All phis are redundant. Replace them with their live input.
+      for (Node* const phi : phis) ReplaceNode(phi, phi->InputAt(live_index));
+      // The merge itself is redundant.
+      return node->InputAt(live_index);
+    }
+
+    // Edit phis in place, removing dead inputs and revisiting them.
+    for (Node* const phi : phis) {
+      TRACE(("  PhiInMerge: #%d:%s (%d live)\n", phi->id(),
+             phi->op()->mnemonic(), live));
+      RemoveDeadInputs(node, phi);
+      Revisit(phi);
+    }
+    // Edit the merge in place, removing dead inputs.
+    RemoveDeadInputs(node, node);
+    return node;
+  }
+
+  // Reduce branches if they have constant inputs.
+  Node* ReduceBranch(Node* node) {
+    Decision result = DecideCondition(node->InputAt(0));
+    if (result == kUnknown) return node;
+
+    TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
+           (result == kTrue) ? "true" : "false"));
+
+    // Replace IfTrue and IfFalse projections from this branch.
+    Node* control = NodeProperties::GetControlInput(node);
+    for (Edge edge : node->use_edges()) {
+      Node* use = edge.from();
+      if (use->opcode() == IrOpcode::kIfTrue) {
+        TRACE(("  IfTrue: #%d:%s\n", use->id(), use->op()->mnemonic()));
+        edge.UpdateTo(NULL);
+        ReplaceNode(use, (result == kTrue) ? control : dead());
+      } else if (use->opcode() == IrOpcode::kIfFalse) {
+        TRACE(("  IfFalse: #%d:%s\n", use->id(), use->op()->mnemonic()));
+        edge.UpdateTo(NULL);
+        ReplaceNode(use, (result == kTrue) ? dead() : control);
+      }
+    }
+    return control;
+  }
+
+  // Remove inputs to {node} corresponding to the dead inputs to {merge}
+  // and compact the remaining inputs, updating the operator.
+  void RemoveDeadInputs(Node* merge, Node* node) {
+    int pos = 0;
+    for (int i = 0; i < node->InputCount(); i++) {
+      // skip dead inputs.
+      if (i < merge->InputCount() &&
+          merge->InputAt(i)->opcode() == IrOpcode::kDead)
+        continue;
+      // compact live inputs.
+      if (pos != i) node->ReplaceInput(pos, node->InputAt(i));
+      pos++;
+    }
+    node->TrimInputCount(pos);
+    if (node->opcode() == IrOpcode::kPhi) {
+      node->set_op(common_->Phi(OpParameter<MachineType>(node->op()), pos - 1));
+    } else if (node->opcode() == IrOpcode::kEffectPhi) {
+      node->set_op(common_->EffectPhi(pos - 1));
+    } else if (node->opcode() == IrOpcode::kMerge) {
+      node->set_op(common_->Merge(pos));
+    } else if (node->opcode() == IrOpcode::kLoop) {
+      node->set_op(common_->Loop(pos));
+    } else {
+      UNREACHABLE();
+    }
+  }
+
+  // Replace uses of {node} with {replacement} and revisit the uses.
+  void ReplaceNode(Node* node, Node* replacement) {
+    if (node == replacement) return;
+    TRACE(("  Replace: #%d:%s with #%d:%s\n", node->id(),
+           node->op()->mnemonic(), replacement->id(),
+           replacement->op()->mnemonic()));
+    for (Node* const use : node->uses()) {
+      // Don't revisit this node if it refers to itself.
+      if (use != node) Revisit(use);
+    }
+    node->ReplaceUses(replacement);
+    node->Kill();
+  }
+
+  Graph* graph() { return jsgraph_->graph(); }
+};
+
+
+void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
+                                 CommonOperatorBuilder* common) {
+  ControlReducerImpl impl(zone, jsgraph, common);
+  impl.Reduce();
+}
+
+
+void ControlReducer::TrimGraph(Zone* zone, JSGraph* jsgraph) {
+  ControlReducerImpl impl(zone, jsgraph, NULL);
+  impl.Trim();
+}
+
+
+Node* ControlReducer::ReducePhiForTesting(JSGraph* jsgraph,
+                                          CommonOperatorBuilder* common,
+                                          Node* node) {
+  Zone zone(jsgraph->graph()->zone()->isolate());
+  ControlReducerImpl impl(&zone, jsgraph, common);
+  return impl.ReducePhi(node);
+}
+
+
+Node* ControlReducer::ReduceMergeForTesting(JSGraph* jsgraph,
+                                            CommonOperatorBuilder* common,
+                                            Node* node) {
+  Zone zone(jsgraph->graph()->zone()->isolate());
+  ControlReducerImpl impl(&zone, jsgraph, common);
+  return impl.ReduceMerge(node);
+}
+
+
+Node* ControlReducer::ReduceBranchForTesting(JSGraph* jsgraph,
+                                             CommonOperatorBuilder* common,
+                                             Node* node) {
+  Zone zone(jsgraph->graph()->zone()->isolate());
+  ControlReducerImpl impl(&zone, jsgraph, common);
+  return impl.ReduceBranch(node);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/control-reducer.h b/src/compiler/control-reducer.h
new file mode 100644
index 0000000..e25bb88
--- /dev/null
+++ b/src/compiler/control-reducer.h
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_REDUCER_H_
+#define V8_COMPILER_CONTROL_REDUCER_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+class CommonOperatorBuilder;
+class Node;
+
+class ControlReducer {
+ public:
+  // Perform branch folding and dead code elimination on the graph.
+  static void ReduceGraph(Zone* zone, JSGraph* graph,
+                          CommonOperatorBuilder* builder);
+
+  // Trim nodes in the graph that are not reachable from end.
+  static void TrimGraph(Zone* zone, JSGraph* graph);
+
+  // Testing interface.
+  static Node* ReducePhiForTesting(JSGraph* graph,
+                                   CommonOperatorBuilder* builder, Node* node);
+  static Node* ReduceBranchForTesting(JSGraph* graph,
+                                      CommonOperatorBuilder* builder,
+                                      Node* node);
+  static Node* ReduceMergeForTesting(JSGraph* graph,
+                                     CommonOperatorBuilder* builder,
+                                     Node* node);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_CONTROL_REDUCER_H_
diff --git a/src/compiler/diamond.h b/src/compiler/diamond.h
new file mode 100644
index 0000000..6133cc5
--- /dev/null
+++ b/src/compiler/diamond.h
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DIAMOND_H_
+#define V8_COMPILER_DIAMOND_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper to make it easier to build diamond-shaped control patterns.
+struct Diamond {
+  Graph* graph;
+  CommonOperatorBuilder* common;
+  Node* branch;
+  Node* if_true;
+  Node* if_false;
+  Node* merge;
+
+  Diamond(Graph* g, CommonOperatorBuilder* b, Node* cond,
+          BranchHint hint = BranchHint::kNone) {
+    graph = g;
+    common = b;
+    branch = graph->NewNode(common->Branch(hint), cond, graph->start());
+    if_true = graph->NewNode(common->IfTrue(), branch);
+    if_false = graph->NewNode(common->IfFalse(), branch);
+    merge = graph->NewNode(common->Merge(2), if_true, if_false);
+  }
+
+  // Place {this} after {that} in control flow order.
+  void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+
+  // Place {this} after {that} in control flow order.
+  void Chain(Node* that) { branch->ReplaceInput(1, that); }
+
+  // Nest {this} into either the if_true or if_false branch of {that}.
+  void Nest(Diamond& that, bool if_true) {
+    if (if_true) {
+      branch->ReplaceInput(1, that.if_true);
+      that.merge->ReplaceInput(0, merge);
+    } else {
+      branch->ReplaceInput(1, that.if_false);
+      that.merge->ReplaceInput(1, merge);
+    }
+  }
+
+  Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
+    return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
+  }
+
+  Node* EffectPhi(Node* tv, Node* fv) {
+    return graph->NewNode(common->EffectPhi(2), tv, fv, merge);
+  }
+
+  void OverwriteWithPhi(Node* node, MachineType machine_type, Node* tv,
+                        Node* fv) {
+    DCHECK(node->InputCount() >= 3);
+    node->set_op(common->Phi(machine_type, 2));
+    node->ReplaceInput(0, tv);
+    node->ReplaceInput(1, fv);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  }
+
+  void OverwriteWithEffectPhi(Node* node, Node* te, Node* fe) {
+    DCHECK(node->InputCount() >= 3);
+    node->set_op(common->EffectPhi(2));
+    node->ReplaceInput(0, te);
+    node->ReplaceInput(1, fe);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_DIAMOND_H_
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index afcbc37..f99d7bd 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -7,7 +7,7 @@
 
 #include "src/v8.h"
 
-#include "src/data-flow.h"
+#include "src/bit-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -17,7 +17,7 @@
 // registers for a compiled function. Frames are usually populated by the
 // register allocator and are used by Linkage to generate code for the prologue
 // and epilogue to compiled code.
-class Frame {
+class Frame : public ZoneObject {
  public:
   Frame()
       : register_save_area_size_(0),
@@ -69,6 +69,8 @@
   int double_spill_slot_count_;
   BitVector* allocated_registers_;
   BitVector* allocated_double_registers_;
+
+  DISALLOW_COPY_AND_ASSIGN(Frame);
 };
 
 
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
index 98aaab2..4f4f4e4 100644
--- a/src/compiler/gap-resolver.h
+++ b/src/compiler/gap-resolver.h
@@ -39,8 +39,9 @@
   // Assembler used to emit moves and save registers.
   Assembler* const assembler_;
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/src/compiler/generic-algorithm-inl.h b/src/compiler/generic-algorithm-inl.h
deleted file mode 100644
index a25131f..0000000
--- a/src/compiler/generic-algorithm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
-#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
-
-#include <vector>
-
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class N>
-class NodeInputIterationTraits {
- public:
-  typedef N Node;
-  typedef typename N::Inputs::iterator Iterator;
-
-  static Iterator begin(Node* node) { return node->inputs().begin(); }
-  static Iterator end(Node* node) { return node->inputs().end(); }
-  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
-  static Node* to(Iterator iterator) { return *iterator; }
-  static Node* from(Iterator iterator) { return iterator.edge().from(); }
-};
-
-template <class N>
-class NodeUseIterationTraits {
- public:
-  typedef N Node;
-  typedef typename N::Uses::iterator Iterator;
-
-  static Iterator begin(Node* node) { return node->uses().begin(); }
-  static Iterator end(Node* node) { return node->uses().end(); }
-  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
-  static Node* to(Iterator iterator) { return *iterator; }
-  static Node* from(Iterator iterator) { return iterator.edge().to(); }
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
diff --git a/src/compiler/generic-algorithm.h b/src/compiler/generic-algorithm.h
index cd4984f..391757e 100644
--- a/src/compiler/generic-algorithm.h
+++ b/src/compiler/generic-algorithm.h
@@ -6,69 +6,60 @@
 #define V8_COMPILER_GENERIC_ALGORITHM_H_
 
 #include <stack>
+#include <vector>
 
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+class Graph;
+class Node;
+
 // GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
 // post-order. Visitation uses an explicitly allocated stack rather than the
-// execution stack to avoid stack overflow. Although GenericGraphVisit is
-// primarily intended to traverse networks of nodes through their
-// dependencies and uses, it also can be used to visit any graph-like network
-// by specifying custom traits.
+// execution stack to avoid stack overflow.
 class GenericGraphVisit {
  public:
-  enum Control {
-    CONTINUE = 0x0,  // Continue depth-first normally
-    SKIP = 0x1,      // Skip this node and its successors
-    REENTER = 0x2,   // Allow reentering this node
-    DEFER = SKIP | REENTER
-  };
-
   // struct Visitor {
-  //   Control Pre(Traits::Node* current);
-  //   Control Post(Traits::Node* current);
-  //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
-  //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+  //   void Pre(Node* current);
+  //   void Post(Node* current);
+  //   void PreEdge(Node* from, int index, Node* to);
+  //   void PostEdge(Node* from, int index, Node* to);
   // }
-  template <class Visitor, class Traits, class RootIterator>
-  static void Visit(GenericGraphBase* graph, Zone* zone,
-                    RootIterator root_begin, RootIterator root_end,
-                    Visitor* visitor) {
-    typedef typename Traits::Node Node;
-    typedef typename Traits::Iterator Iterator;
+  template <class Visitor>
+  static void Visit(Graph* graph, Zone* zone, Node** root_begin,
+                    Node** root_end, Visitor* visitor) {
+    typedef typename Node::InputEdges::iterator Iterator;
     typedef std::pair<Iterator, Iterator> NodeState;
     typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
     NodeStateStack stack((ZoneDeque<NodeState>(zone)));
-    BoolVector visited(Traits::max_id(graph), false, zone);
+    BoolVector visited(graph->NodeCount(), false, zone);
     Node* current = *root_begin;
     while (true) {
       DCHECK(current != NULL);
       const int id = current->id();
       DCHECK(id >= 0);
-      DCHECK(id < Traits::max_id(graph));  // Must be a valid id.
+      DCHECK(id < graph->NodeCount());  // Must be a valid id.
       bool visit = !GetVisited(&visited, id);
       if (visit) {
-        Control control = visitor->Pre(current);
-        visit = !IsSkip(control);
-        if (!IsReenter(control)) SetVisited(&visited, id, true);
+        visitor->Pre(current);
+        SetVisited(&visited, id);
       }
-      Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
-      Iterator end(Traits::end(current));
+      Iterator begin(visit ? current->input_edges().begin()
+                           : current->input_edges().end());
+      Iterator end(current->input_edges().end());
       stack.push(NodeState(begin, end));
       Node* post_order_node = current;
       while (true) {
         NodeState top = stack.top();
         if (top.first == top.second) {
           if (visit) {
-            Control control = visitor->Post(post_order_node);
-            DCHECK(!IsSkip(control));
-            SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+            visitor->Post(post_order_node);
+            SetVisited(&visited, post_order_node->id());
           }
           stack.pop();
           if (stack.empty()) {
@@ -76,48 +67,42 @@
             current = *root_begin;
             break;
           }
-          post_order_node = Traits::from(stack.top().first);
+          post_order_node = (*stack.top().first).from();
           visit = true;
         } else {
-          visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
-                           Traits::to(top.first));
-          current = Traits::to(top.first);
+          visitor->PreEdge((*top.first).from(), (*top.first).index(),
+                           (*top.first).to());
+          current = (*top.first).to();
           if (!GetVisited(&visited, current->id())) break;
         }
         top = stack.top();
-        visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
-                          Traits::to(top.first));
+        visitor->PostEdge((*top.first).from(), (*top.first).index(),
+                          (*top.first).to());
         ++stack.top().first;
       }
     }
   }
 
-  template <class Visitor, class Traits>
-  static void Visit(GenericGraphBase* graph, Zone* zone,
-                    typename Traits::Node* current, Visitor* visitor) {
-    typename Traits::Node* array[] = {current};
-    Visit<Visitor, Traits>(graph, zone, &array[0], &array[1], visitor);
+  template <class Visitor>
+  static void Visit(Graph* graph, Zone* zone, Node* current, Visitor* visitor) {
+    Node* array[] = {current};
+    Visit<Visitor>(graph, zone, &array[0], &array[1], visitor);
   }
 
-  template <class B, class S>
   struct NullNodeVisitor {
-    Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
-    Control Post(GenericNode<B, S>* node) { return CONTINUE; }
-    void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
-    void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+    void Pre(Node* node) {}
+    void Post(Node* node) {}
+    void PreEdge(Node* from, int index, Node* to) {}
+    void PostEdge(Node* from, int index, Node* to) {}
   };
 
  private:
-  static bool IsSkip(Control c) { return c & SKIP; }
-  static bool IsReenter(Control c) { return c & REENTER; }
-
-  // TODO(turbofan): resizing could be optionally templatized away.
-  static void SetVisited(BoolVector* visited, int id, bool value) {
+  static void SetVisited(BoolVector* visited, int id) {
     if (id >= static_cast<int>(visited->size())) {
       // Resize and set all values to unvisited.
       visited->resize((3 * id) / 2, false);
     }
-    visited->at(id) = value;
+    visited->at(id) = true;
   }
 
   static bool GetVisited(BoolVector* visited, int id) {
@@ -125,8 +110,11 @@
     return visited->at(id);
   }
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+typedef GenericGraphVisit::NullNodeVisitor NullNodeVisitor;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/src/compiler/generic-graph.h b/src/compiler/generic-graph.h
deleted file mode 100644
index a555456..0000000
--- a/src/compiler/generic-graph.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_GRAPH_H_
-#define V8_COMPILER_GENERIC_GRAPH_H_
-
-#include "src/compiler/generic-node.h"
-
-namespace v8 {
-namespace internal {
-
-class Zone;
-
-namespace compiler {
-
-class GenericGraphBase : public ZoneObject {
- public:
-  explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
-
-  Zone* zone() const { return zone_; }
-
-  NodeId NextNodeID() { return next_node_id_++; }
-  NodeId NodeCount() const { return next_node_id_; }
-
- private:
-  Zone* zone_;
-  NodeId next_node_id_;
-};
-
-template <class V>
-class GenericGraph : public GenericGraphBase {
- public:
-  explicit GenericGraph(Zone* zone)
-      : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
-
-  V* start() { return start_; }
-  V* end() { return end_; }
-
-  void SetStart(V* start) { start_ = start; }
-  void SetEnd(V* end) { end_ = end; }
-
- private:
-  V* start_;
-  V* end_;
-
-  DISALLOW_COPY_AND_ASSIGN(GenericGraph);
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/src/compiler/generic-node-inl.h b/src/compiler/generic-node-inl.h
deleted file mode 100644
index c2dc24e..0000000
--- a/src/compiler/generic-node-inl.h
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
-#define V8_COMPILER_GENERIC_NODE_INL_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class B, class S>
-GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
-    : BaseClass(graph->zone()),
-      input_count_(input_count),
-      has_appendable_inputs_(false),
-      use_count_(0),
-      first_use_(NULL),
-      last_use_(NULL) {
-  inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
-}
-
-template <class B, class S>
-inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
-  id_ = graph->NextNodeID();
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Inputs::iterator
-GenericNode<B, S>::Inputs::begin() {
-  return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0);
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Inputs::iterator
-GenericNode<B, S>::Inputs::end() {
-  return typename GenericNode<B, S>::Inputs::iterator(
-      this->node_, this->node_->InputCount());
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Uses::iterator
-GenericNode<B, S>::Uses::begin() {
-  return typename GenericNode<B, S>::Uses::iterator(this->node_);
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Uses::iterator
-GenericNode<B, S>::Uses::end() {
-  return typename GenericNode<B, S>::Uses::iterator();
-}
-
-template <class B, class S>
-void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
-  for (Use* use = first_use_; use != NULL; use = use->next) {
-    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
-  }
-  if (replace_to->last_use_ == NULL) {
-    DCHECK_EQ(NULL, replace_to->first_use_);
-    replace_to->first_use_ = first_use_;
-    replace_to->last_use_ = last_use_;
-  } else if (first_use_ != NULL) {
-    DCHECK_NE(NULL, replace_to->first_use_);
-    replace_to->last_use_->next = first_use_;
-    first_use_->prev = replace_to->last_use_;
-    replace_to->last_use_ = last_use_;
-  }
-  replace_to->use_count_ += use_count_;
-  use_count_ = 0;
-  first_use_ = NULL;
-  last_use_ = NULL;
-}
-
-template <class B, class S>
-template <class UnaryPredicate>
-void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
-                                      GenericNode* replace_to) {
-  for (Use* use = first_use_; use != NULL;) {
-    Use* next = use->next;
-    if (pred(static_cast<S*>(use->from))) {
-      RemoveUse(use);
-      replace_to->AppendUse(use);
-      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
-    }
-    use = next;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveAllInputs() {
-  for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
-       ++iter) {
-    iter.GetInput()->Update(NULL);
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::TrimInputCount(int new_input_count) {
-  if (new_input_count == input_count_) return;  // Nothing to do.
-
-  DCHECK(new_input_count < input_count_);
-
-  // Update inline inputs.
-  for (int i = new_input_count; i < input_count_; i++) {
-    typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
-    input->Update(NULL);
-  }
-  input_count_ = new_input_count;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
-  Input* input = GetInputRecordPtr(index);
-  input->Update(new_to);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
-  GenericNode* old_to = this->to;
-  if (new_to == old_to) return;  // Nothing to do.
-  // Snip out the use from where it used to be
-  if (old_to != NULL) {
-    old_to->RemoveUse(use);
-  }
-  to = new_to;
-  // And put it into the new node's use list.
-  if (new_to != NULL) {
-    new_to->AppendUse(use);
-  } else {
-    use->next = NULL;
-    use->prev = NULL;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
-  if (!has_appendable_inputs_) {
-    void* deque_buffer = zone->New(sizeof(InputDeque));
-    InputDeque* deque = new (deque_buffer) InputDeque(zone);
-    for (int i = 0; i < input_count_; ++i) {
-      deque->push_back(inputs_.static_[i]);
-    }
-    inputs_.appendable_ = deque;
-    has_appendable_inputs_ = true;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
-  EnsureAppendableInputs(zone);
-  Use* new_use = new (zone) Use;
-  Input new_input;
-  new_input.to = to_append;
-  new_input.use = new_use;
-  inputs_.appendable_->push_back(new_input);
-  new_use->input_index = input_count_;
-  new_use->from = this;
-  to_append->AppendUse(new_use);
-  input_count_++;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::InsertInput(Zone* zone, int index,
-                                    GenericNode<B, S>* to_insert) {
-  DCHECK(index >= 0 && index < InputCount());
-  // TODO(turbofan): Optimize this implementation!
-  AppendInput(zone, InputAt(InputCount() - 1));
-  for (int i = InputCount() - 1; i > index; --i) {
-    ReplaceInput(i, InputAt(i - 1));
-  }
-  ReplaceInput(index, to_insert);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveInput(int index) {
-  DCHECK(index >= 0 && index < InputCount());
-  // TODO(turbofan): Optimize this implementation!
-  for (; index < InputCount() - 1; ++index) {
-    ReplaceInput(index, InputAt(index + 1));
-  }
-  TrimInputCount(InputCount() - 1);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::AppendUse(Use* use) {
-  use->next = NULL;
-  use->prev = last_use_;
-  if (last_use_ == NULL) {
-    first_use_ = use;
-  } else {
-    last_use_->next = use;
-  }
-  last_use_ = use;
-  ++use_count_;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveUse(Use* use) {
-  if (last_use_ == use) {
-    last_use_ = use->prev;
-  }
-  if (use->prev != NULL) {
-    use->prev->next = use->next;
-  } else {
-    first_use_ = use->next;
-  }
-  if (use->next != NULL) {
-    use->next->prev = use->prev;
-  }
-  --use_count_;
-}
-
-template <class B, class S>
-inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
-  return first_use_ != NULL && first_use_->from == owner &&
-         first_use_->next == NULL;
-}
-
-template <class B, class S>
-S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
-                          S** inputs) {
-  size_t node_size = sizeof(GenericNode);
-  size_t inputs_size = input_count * sizeof(Input);
-  size_t uses_size = input_count * sizeof(Use);
-  int size = static_cast<int>(node_size + inputs_size + uses_size);
-  Zone* zone = graph->zone();
-  void* buffer = zone->New(size);
-  S* result = new (buffer) S(graph, input_count);
-  Input* input =
-      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
-  Use* use =
-      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
-
-  for (int current = 0; current < input_count; ++current) {
-    GenericNode* to = *inputs++;
-    input->to = to;
-    input->use = use;
-    use->input_index = current;
-    use->from = result;
-    to->AppendUse(use);
-    ++use;
-    ++input;
-  }
-  return result;
-}
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/src/compiler/generic-node.h b/src/compiler/generic-node.h
deleted file mode 100644
index 3dc324d..0000000
--- a/src/compiler/generic-node.h
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_NODE_H_
-#define V8_COMPILER_GENERIC_NODE_H_
-
-#include "src/v8.h"
-
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class GenericGraphBase;
-
-typedef int NodeId;
-
-// A GenericNode<> is the basic primitive of graphs. GenericNode's are
-// chained together by input/use chains but by default otherwise contain only an
-// identifying number which specific applications of graphs and nodes can use
-// to index auxiliary out-of-line data, especially transient data.
-// Specializations of the templatized GenericNode<> class must provide a base
-// class B that contains all of the members to be made available in each
-// specialized Node instance. GenericNode uses a mixin template pattern to
-// ensure that common accessors and methods expect the derived class S type
-// rather than the GenericNode<B, S> type.
-template <class B, class S>
-class GenericNode : public B {
- public:
-  typedef B BaseClass;
-  typedef S DerivedClass;
-
-  inline NodeId id() const { return id_; }
-
-  int InputCount() const { return input_count_; }
-  S* InputAt(int index) const {
-    return static_cast<S*>(GetInputRecordPtr(index)->to);
-  }
-  inline void ReplaceInput(int index, GenericNode* new_input);
-  inline void AppendInput(Zone* zone, GenericNode* new_input);
-  inline void InsertInput(Zone* zone, int index, GenericNode* new_input);
-  inline void RemoveInput(int index);
-
-  int UseCount() { return use_count_; }
-  S* UseAt(int index) {
-    DCHECK(index < use_count_);
-    Use* current = first_use_;
-    while (index-- != 0) {
-      current = current->next;
-    }
-    return static_cast<S*>(current->from);
-  }
-  inline void ReplaceUses(GenericNode* replace_to);
-  template <class UnaryPredicate>
-  inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
-  inline void RemoveAllInputs();
-
-  inline void TrimInputCount(int input_count);
-
-  class Inputs {
-   public:
-    class iterator;
-    iterator begin();
-    iterator end();
-
-    explicit Inputs(GenericNode* node) : node_(node) {}
-
-   private:
-    GenericNode* node_;
-  };
-
-  Inputs inputs() { return Inputs(this); }
-
-  class Uses {
-   public:
-    class iterator;
-    iterator begin();
-    iterator end();
-    bool empty() { return begin() == end(); }
-
-    explicit Uses(GenericNode* node) : node_(node) {}
-
-   private:
-    GenericNode* node_;
-  };
-
-  Uses uses() { return Uses(this); }
-
-  class Edge;
-
-  bool OwnedBy(GenericNode* owner) const;
-
-  static S* New(GenericGraphBase* graph, int input_count, S** inputs);
-
- protected:
-  friend class GenericGraphBase;
-
-  class Use : public ZoneObject {
-   public:
-    GenericNode* from;
-    Use* next;
-    Use* prev;
-    int input_index;
-  };
-
-  class Input {
-   public:
-    GenericNode* to;
-    Use* use;
-
-    void Update(GenericNode* new_to);
-  };
-
-  void EnsureAppendableInputs(Zone* zone);
-
-  Input* GetInputRecordPtr(int index) const {
-    if (has_appendable_inputs_) {
-      return &((*inputs_.appendable_)[index]);
-    } else {
-      return inputs_.static_ + index;
-    }
-  }
-
-  inline void AppendUse(Use* use);
-  inline void RemoveUse(Use* use);
-
-  void* operator new(size_t, void* location) { return location; }
-
-  GenericNode(GenericGraphBase* graph, int input_count);
-
- private:
-  void AssignUniqueID(GenericGraphBase* graph);
-
-  typedef ZoneDeque<Input> InputDeque;
-
-  NodeId id_;
-  int input_count_ : 31;
-  bool has_appendable_inputs_ : 1;
-  union {
-    // When a node is initially allocated, it uses a static buffer to hold its
-    // inputs under the assumption that the number of outputs will not increase.
-    // When the first input is appended, the static buffer is converted into a
-    // deque to allow for space-efficient growing.
-    Input* static_;
-    InputDeque* appendable_;
-  } inputs_;
-  int use_count_;
-  Use* first_use_;
-  Use* last_use_;
-
-  DISALLOW_COPY_AND_ASSIGN(GenericNode);
-};
-
-// An encapsulation for information associated with a single use of node as a
-// input from another node, allowing access to both the defining node and
-// the ndoe having the input.
-template <class B, class S>
-class GenericNode<B, S>::Edge {
- public:
-  S* from() const { return static_cast<S*>(input_->use->from); }
-  S* to() const { return static_cast<S*>(input_->to); }
-  int index() const {
-    int index = input_->use->input_index;
-    DCHECK(index < input_->use->from->input_count_);
-    return index;
-  }
-
- private:
-  friend class GenericNode<B, S>::Uses::iterator;
-  friend class GenericNode<B, S>::Inputs::iterator;
-
-  explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
-
-  typename GenericNode<B, S>::Input* input_;
-};
-
-// A forward iterator to visit the nodes which are depended upon by a node
-// in the order of input.
-template <class B, class S>
-class GenericNode<B, S>::Inputs::iterator {
- public:
-  iterator(const typename GenericNode<B, S>::Inputs::iterator& other)  // NOLINT
-      : node_(other.node_),
-        index_(other.index_) {}
-
-  S* operator*() { return static_cast<S*>(GetInput()->to); }
-  typename GenericNode<B, S>::Edge edge() {
-    return typename GenericNode::Edge(GetInput());
-  }
-  bool operator==(const iterator& other) const {
-    return other.index_ == index_ && other.node_ == node_;
-  }
-  bool operator!=(const iterator& other) const { return !(other == *this); }
-  iterator& operator++() {
-    DCHECK(node_ != NULL);
-    DCHECK(index_ < node_->input_count_);
-    ++index_;
-    return *this;
-  }
-  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
-    typename GenericNode<B, S>::Input* input = GetInput();
-    input->Update(new_to);
-    index_++;
-    return *this;
-  }
-  int index() { return index_; }
-
- private:
-  friend class GenericNode;
-
-  explicit iterator(GenericNode* node, int index)
-      : node_(node), index_(index) {}
-
-  Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
-
-  GenericNode* node_;
-  int index_;
-};
-
-// A forward iterator to visit the uses of a node. The uses are returned in
-// the order in which they were added as inputs.
-template <class B, class S>
-class GenericNode<B, S>::Uses::iterator {
- public:
-  iterator(const typename GenericNode<B, S>::Uses::iterator& other)  // NOLINT
-      : current_(other.current_),
-        index_(other.index_) {}
-
-  S* operator*() { return static_cast<S*>(current_->from); }
-  typename GenericNode<B, S>::Edge edge() {
-    return typename GenericNode::Edge(CurrentInput());
-  }
-
-  bool operator==(const iterator& other) { return other.current_ == current_; }
-  bool operator!=(const iterator& other) { return other.current_ != current_; }
-  iterator& operator++() {
-    DCHECK(current_ != NULL);
-    index_++;
-    current_ = current_->next;
-    return *this;
-  }
-  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
-    DCHECK(current_ != NULL);
-    index_++;
-    typename GenericNode<B, S>::Input* input = CurrentInput();
-    current_ = current_->next;
-    input->Update(new_to);
-    return *this;
-  }
-  int index() const { return index_; }
-
- private:
-  friend class GenericNode<B, S>::Uses;
-
-  iterator() : current_(NULL), index_(0) {}
-  explicit iterator(GenericNode<B, S>* node)
-      : current_(node->first_use_), index_(0) {}
-
-  Input* CurrentInput() const {
-    return current_->from->GetInputRecordPtr(current_->input_index);
-  }
-
-  typename GenericNode<B, S>::Use* current_;
-  int index_;
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_NODE_H_
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
index 8992881..6321aaa 100644
--- a/src/compiler/graph-builder.cc
+++ b/src/compiler/graph-builder.cc
@@ -4,53 +4,65 @@
 
 #include "src/compiler/graph-builder.h"
 
+#include "src/bit-vector.h"
 #include "src/compiler.h"
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
 
-StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+StructuredGraphBuilder::StructuredGraphBuilder(Zone* local_zone, Graph* graph,
                                                CommonOperatorBuilder* common)
     : GraphBuilder(graph),
       common_(common),
       environment_(NULL),
+      local_zone_(local_zone),
+      input_buffer_size_(0),
+      input_buffer_(NULL),
       current_context_(NULL),
-      exit_control_(NULL) {}
+      exit_control_(NULL) {
+  EnsureInputBufferSize(kInputBufferSizeIncrement);
+}
+
+
+Node** StructuredGraphBuilder::EnsureInputBufferSize(int size) {
+  if (size > input_buffer_size_) {
+    size += kInputBufferSizeIncrement;
+    input_buffer_ = local_zone()->NewArray<Node*>(size);
+  }
+  return input_buffer_;
+}
 
 
 Node* StructuredGraphBuilder::MakeNode(const Operator* op,
                                        int value_input_count,
-                                       Node** value_inputs) {
-  DCHECK(op->InputCount() == value_input_count);
+                                       Node** value_inputs, bool incomplete) {
+  DCHECK(op->ValueInputCount() == value_input_count);
 
   bool has_context = OperatorProperties::HasContextInput(op);
   bool has_framestate = OperatorProperties::HasFrameStateInput(op);
-  bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
-  bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+  bool has_control = op->ControlInputCount() == 1;
+  bool has_effect = op->EffectInputCount() == 1;
 
-  DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
-  DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+  DCHECK(op->ControlInputCount() < 2);
+  DCHECK(op->EffectInputCount() < 2);
 
   Node* result = NULL;
   if (!has_context && !has_framestate && !has_control && !has_effect) {
-    result = graph()->NewNode(op, value_input_count, value_inputs);
+    result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
     if (has_framestate) ++input_count_with_deps;
     if (has_control) ++input_count_with_deps;
     if (has_effect) ++input_count_with_deps;
-    Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
+    Node** buffer = EnsureInputBufferSize(input_count_with_deps);
     memcpy(buffer, value_inputs, kPointerSize * value_input_count);
     Node** current_input = buffer + value_input_count;
     if (has_context) {
@@ -68,11 +80,11 @@
     if (has_control) {
       *current_input++ = environment_->GetControlDependency();
     }
-    result = graph()->NewNode(op, input_count_with_deps, buffer);
+    result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
     if (has_effect) {
       environment_->UpdateEffectDependency(result);
     }
-    if (OperatorProperties::HasControlOutput(result->op()) &&
+    if (result->op()->ControlOutputCount() > 0 &&
         !environment()->IsMarkedAsUnreachable()) {
       environment_->UpdateControlDependency(result);
     }
@@ -95,7 +107,7 @@
 
 StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
     Environment* env) {
-  return new (zone()) Environment(*env);
+  return new (local_zone()) Environment(*env);
 }
 
 
@@ -111,7 +123,11 @@
     : builder_(copy.builder()),
       control_dependency_(copy.control_dependency_),
       effect_dependency_(copy.effect_dependency_),
-      values_(copy.values_) {}
+      values_(copy.zone()) {
+  const size_t kStackEstimate = 7;  // optimum from experimentation!
+  values_.reserve(copy.values_.size() + kStackEstimate);
+  values_.insert(values_.begin(), copy.values_.begin(), copy.values_.end());
+}
 
 
 void StructuredGraphBuilder::Environment::Merge(Environment* other) {
@@ -124,7 +140,9 @@
   // placing a singleton merge as the new control dependency.
   if (this->IsMarkedAsUnreachable()) {
     Node* other_control = other->control_dependency_;
-    control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+    Node* inputs[] = {other_control};
+    control_dependency_ =
+        graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
     effect_dependency_ = other->effect_dependency_;
     values_ = other->values_;
     return;
@@ -150,11 +168,22 @@
 }
 
 
-void StructuredGraphBuilder::Environment::PrepareForLoop() {
+void StructuredGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
   Node* control = GetControlDependency();
-  for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
-    Node* phi = builder_->NewPhi(1, values()->at(i), control);
-    values()->at(i) = phi;
+  int size = static_cast<int>(values()->size());
+  if (assigned == NULL) {
+    // Assume that everything is updated in the loop.
+    for (int i = 0; i < size; ++i) {
+      Node* phi = builder_->NewPhi(1, values()->at(i), control);
+      values()->at(i) = phi;
+    }
+  } else {
+    // Only build phis for those locals assigned in this loop.
+    for (int i = 0; i < size; ++i) {
+      if (i < assigned->length() && !assigned->Contains(i)) continue;
+      Node* phi = builder_->NewPhi(1, values()->at(i), control);
+      values()->at(i) = phi;
+    }
   }
   Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
   UpdateEffectDependency(effect);
@@ -163,10 +192,10 @@
 
 Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
   const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
-  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  Node** buffer = EnsureInputBufferSize(count + 1);
   MemsetPointer(buffer, input, count);
   buffer[count] = control;
-  return graph()->NewNode(phi_op, count + 1, buffer);
+  return graph()->NewNode(phi_op, count + 1, buffer, true);
 }
 
 
@@ -174,29 +203,30 @@
 Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
                                            Node* control) {
   const Operator* phi_op = common()->EffectPhi(count);
-  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  Node** buffer = EnsureInputBufferSize(count + 1);
   MemsetPointer(buffer, input, count);
   buffer[count] = control;
-  return graph()->NewNode(phi_op, count + 1, buffer);
+  return graph()->NewNode(phi_op, count + 1, buffer, true);
 }
 
 
 Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
-  int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1;
+  int inputs = control->op()->ControlInputCount() + 1;
   if (control->opcode() == IrOpcode::kLoop) {
     // Control node for loop exists, add input.
     const Operator* op = common()->Loop(inputs);
-    control->AppendInput(zone(), other);
+    control->AppendInput(graph_zone(), other);
     control->set_op(op);
   } else if (control->opcode() == IrOpcode::kMerge) {
     // Control node for merge exists, add input.
     const Operator* op = common()->Merge(inputs);
-    control->AppendInput(zone(), other);
+    control->AppendInput(graph_zone(), other);
     control->set_op(op);
   } else {
     // Control node is a singleton, introduce a merge.
     const Operator* op = common()->Merge(inputs);
-    control = graph()->NewNode(op, control, other);
+    Node* inputs[] = {control, other};
+    control = graph()->NewNode(op, arraysize(inputs), inputs, true);
   }
   return control;
 }
@@ -204,12 +234,12 @@
 
 Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
                                           Node* control) {
-  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  int inputs = control->op()->ControlInputCount();
   if (value->opcode() == IrOpcode::kEffectPhi &&
       NodeProperties::GetControlInput(value) == control) {
     // Phi already exists, add input.
     value->set_op(common()->EffectPhi(inputs));
-    value->InsertInput(zone(), inputs - 1, other);
+    value->InsertInput(graph_zone(), inputs - 1, other);
   } else if (value != other) {
     // Phi does not exist yet, introduce one.
     value = NewEffectPhi(inputs, value, control);
@@ -221,12 +251,12 @@
 
 Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
                                          Node* control) {
-  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  int inputs = control->op()->ControlInputCount();
   if (value->opcode() == IrOpcode::kPhi &&
       NodeProperties::GetControlInput(value) == control) {
     // Phi already exists, add input.
     value->set_op(common()->Phi(kMachAnyTagged, inputs));
-    value->InsertInput(zone(), inputs - 1, other);
+    value->InsertInput(graph_zone(), inputs - 1, other);
   } else if (value != other) {
     // Phi does not exist yet, introduce one.
     value = NewPhi(inputs, value, control);
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
index c966c29..d88b125 100644
--- a/src/compiler/graph-builder.h
+++ b/src/compiler/graph-builder.h
@@ -14,6 +14,9 @@
 
 namespace v8 {
 namespace internal {
+
+class BitVector;
+
 namespace compiler {
 
 class Node;
@@ -24,42 +27,44 @@
   explicit GraphBuilder(Graph* graph) : graph_(graph) {}
   virtual ~GraphBuilder() {}
 
-  Node* NewNode(const Operator* op) {
-    return MakeNode(op, 0, static_cast<Node**>(NULL));
+  Node* NewNode(const Operator* op, bool incomplete = false) {
+    return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
   }
 
-  Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+  Node* NewNode(const Operator* op, Node* n1) {
+    return MakeNode(op, 1, &n1, false);
+  }
 
   Node* NewNode(const Operator* op, Node* n1, Node* n2) {
     Node* buffer[] = {n1, n2};
-    return MakeNode(op, arraysize(buffer), buffer);
+    return MakeNode(op, arraysize(buffer), buffer, false);
   }
 
   Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
     Node* buffer[] = {n1, n2, n3};
-    return MakeNode(op, arraysize(buffer), buffer);
+    return MakeNode(op, arraysize(buffer), buffer, false);
   }
 
   Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
     Node* buffer[] = {n1, n2, n3, n4};
-    return MakeNode(op, arraysize(buffer), buffer);
+    return MakeNode(op, arraysize(buffer), buffer, false);
   }
 
   Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
                 Node* n5) {
     Node* buffer[] = {n1, n2, n3, n4, n5};
-    return MakeNode(op, arraysize(buffer), buffer);
+    return MakeNode(op, arraysize(buffer), buffer, false);
   }
 
   Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
                 Node* n5, Node* n6) {
     Node* nodes[] = {n1, n2, n3, n4, n5, n6};
-    return MakeNode(op, arraysize(nodes), nodes);
+    return MakeNode(op, arraysize(nodes), nodes, false);
   }
 
-  Node* NewNode(const Operator* op, int value_input_count,
-                Node** value_inputs) {
-    return MakeNode(op, value_input_count, value_inputs);
+  Node* NewNode(const Operator* op, int value_input_count, Node** value_inputs,
+                bool incomplete = false) {
+    return MakeNode(op, value_input_count, value_inputs, incomplete);
   }
 
   Graph* graph() const { return graph_; }
@@ -67,7 +72,7 @@
  protected:
   // Base implementation used by all factory methods.
   virtual Node* MakeNode(const Operator* op, int value_input_count,
-                         Node** value_inputs) = 0;
+                         Node** value_inputs, bool incomplete) = 0;
 
  private:
   Graph* graph_;
@@ -79,8 +84,9 @@
 // StubGraphBuilder).
 class StructuredGraphBuilder : public GraphBuilder {
  public:
-  StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
-  virtual ~StructuredGraphBuilder() {}
+  StructuredGraphBuilder(Zone* zone, Graph* graph,
+                         CommonOperatorBuilder* common);
+  ~StructuredGraphBuilder() OVERRIDE {}
 
   // Creates a new Phi node having {count} input values.
   Node* NewPhi(int count, Node* input, Node* control);
@@ -94,10 +100,10 @@
   // Helpers to create new control nodes.
   Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
   Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
-  Node* NewMerge() { return NewNode(common()->Merge(1)); }
-  Node* NewLoop() { return NewNode(common()->Loop(1)); }
-  Node* NewBranch(Node* condition) {
-    return NewNode(common()->Branch(), condition);
+  Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+  Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+  Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+    return NewNode(common()->Branch(hint), condition);
   }
 
  protected:
@@ -108,8 +114,8 @@
   // The following method creates a new node having the specified operator and
   // ensures effect and control dependencies are wired up. The dependencies
   // tracked by the environment might be mutated.
-  virtual Node* MakeNode(const Operator* op, int value_input_count,
-                         Node** value_inputs) FINAL;
+  Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+                 bool incomplete) FINAL;
 
   Environment* environment() const { return environment_; }
   void set_environment(Environment* env) { environment_ = env; }
@@ -122,9 +128,9 @@
 
   Node* dead_control();
 
-  // TODO(mstarzinger): Use phase-local zone instead!
-  Zone* zone() const { return graph()->zone(); }
-  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* graph_zone() const { return graph()->zone(); }
+  Zone* local_zone() const { return local_zone_; }
+  Isolate* isolate() const { return graph_zone()->isolate(); }
   CommonOperatorBuilder* common() const { return common_; }
 
   // Helper to wrap a Handle<T> into a Unique<T>.
@@ -144,6 +150,13 @@
   CommonOperatorBuilder* common_;
   Environment* environment_;
 
+  // Zone local to the builder for data not leaking into the graph.
+  Zone* local_zone_;
+
+  // Temporary storage for building node input lists.
+  int input_buffer_size_;
+  Node** input_buffer_;
+
   // Node representing the control dependency for dead code.
   SetOncePointer<Node> dead_control_;
 
@@ -153,6 +166,12 @@
   // Merge of all control nodes that exit the function body.
   Node* exit_control_;
 
+  // Growth increment for the temporary buffer used to construct input lists to
+  // new nodes.
+  static const int kInputBufferSizeIncrement = 64;
+
+  Node** EnsureInputBufferSize(int size);
+
   DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
 };
 
@@ -199,23 +218,22 @@
   }
 
   // Copies this environment at a loop header control-flow point.
-  Environment* CopyForLoop() {
-    PrepareForLoop();
+  Environment* CopyForLoop(BitVector* assigned) {
+    PrepareForLoop(assigned);
     return builder()->CopyEnvironment(this);
   }
 
   Node* GetContext() { return builder_->current_context(); }
 
  protected:
-  // TODO(mstarzinger): Use phase-local zone instead!
-  Zone* zone() const { return graph()->zone(); }
+  Zone* zone() const { return builder_->local_zone(); }
   Graph* graph() const { return builder_->graph(); }
   StructuredGraphBuilder* builder() const { return builder_; }
   CommonOperatorBuilder* common() { return builder_->common(); }
   NodeVector* values() { return &values_; }
 
   // Prepare environment to be used as loop header.
-  void PrepareForLoop();
+  void PrepareForLoop(BitVector* assigned);
 
  private:
   StructuredGraphBuilder* builder_;
diff --git a/src/compiler/graph-inl.h b/src/compiler/graph-inl.h
index 571ffb3..c135ae5 100644
--- a/src/compiler/graph-inl.h
+++ b/src/compiler/graph-inl.h
@@ -5,7 +5,7 @@
 #ifndef V8_COMPILER_GRAPH_INL_H_
 #define V8_COMPILER_GRAPH_INL_H_
 
-#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/graph.h"
 
 namespace v8 {
@@ -13,25 +13,13 @@
 namespace compiler {
 
 template <class Visitor>
-void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
-  GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(
-      this, zone(), node, visitor);
-}
-
-
-template <class Visitor>
-void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
-  VisitNodeUsesFrom(start(), visitor);
-}
-
-
-template <class Visitor>
 void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
-  GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
-      this, zone(), end(), visitor);
+  Zone tmp_zone(zone()->isolate());
+  GenericGraphVisit::Visit<Visitor>(this, &tmp_zone, end(), visitor);
 }
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GRAPH_INL_H_
diff --git a/src/compiler/graph-reducer-unittest.cc b/src/compiler/graph-reducer-unittest.cc
deleted file mode 100644
index 6567203..0000000
--- a/src/compiler/graph-reducer-unittest.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/operator.h"
-#include "src/test/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-using testing::_;
-using testing::DefaultValue;
-using testing::Return;
-using testing::Sequence;
-using testing::StrictMock;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-SimpleOperator OP0(0, Operator::kNoWrite, 0, 1, "op0");
-SimpleOperator OP1(1, Operator::kNoProperties, 1, 1, "op1");
-
-
-struct MockReducer : public Reducer {
-  MOCK_METHOD1(Reduce, Reduction(Node*));
-};
-
-}  // namespace
-
-
-class GraphReducerTest : public TestWithZone {
- public:
-  GraphReducerTest() : graph_(zone()) {}
-
-  static void SetUpTestCase() {
-    TestWithZone::SetUpTestCase();
-    DefaultValue<Reduction>::Set(Reducer::NoChange());
-  }
-
-  static void TearDownTestCase() {
-    DefaultValue<Reduction>::Clear();
-    TestWithZone::TearDownTestCase();
-  }
-
- protected:
-  void ReduceNode(Node* node, Reducer* r) {
-    GraphReducer reducer(graph());
-    reducer.AddReducer(r);
-    reducer.ReduceNode(node);
-  }
-
-  void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
-    GraphReducer reducer(graph());
-    reducer.AddReducer(r1);
-    reducer.AddReducer(r2);
-    reducer.ReduceNode(node);
-  }
-
-  void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
-    GraphReducer reducer(graph());
-    reducer.AddReducer(r1);
-    reducer.AddReducer(r2);
-    reducer.AddReducer(r3);
-    reducer.ReduceNode(node);
-  }
-
-  Graph* graph() { return &graph_; }
-
- private:
-  Graph graph_;
-};
-
-
-TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
-  StrictMock<MockReducer> r;
-  Node* node0 = graph()->NewNode(&OP0);
-  Node* node1 = graph()->NewNode(&OP1, node0);
-  Node* node2 = graph()->NewNode(&OP1, node0);
-  EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
-  ReduceNode(node1, &r);
-  EXPECT_FALSE(node0->IsDead());
-  EXPECT_TRUE(node1->IsDead());
-  EXPECT_FALSE(node2->IsDead());
-}
-
-
-TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
-  StrictMock<MockReducer> r1, r2;
-  Node* node0 = graph()->NewNode(&OP0);
-  EXPECT_CALL(r1, Reduce(node0));
-  EXPECT_CALL(r2, Reduce(node0));
-  ReduceNode(node0, &r1, &r2);
-}
-
-
-TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
-  Sequence s1, s2;
-  StrictMock<MockReducer> r1, r2, r3;
-  Node* node0 = graph()->NewNode(&OP0);
-  EXPECT_CALL(r1, Reduce(node0));
-  EXPECT_CALL(r2, Reduce(node0));
-  EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2).WillOnce(
-      Return(Reducer::Changed(node0)));
-  EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
-  EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
-  ReduceNode(node0, &r1, &r2, &r3);
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index 36a54e0..9a6b121 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -12,86 +12,190 @@
 namespace internal {
 namespace compiler {
 
-GraphReducer::GraphReducer(Graph* graph)
-    : graph_(graph), reducers_(graph->zone()) {}
+enum class GraphReducer::State : uint8_t {
+  kUnvisited,
+  kRevisit,
+  kOnStack,
+  kVisited
+};
 
 
-static bool NodeIdIsLessThan(const Node* node, NodeId id) {
-  return node->id() < id;
+GraphReducer::GraphReducer(Graph* graph, Zone* zone)
+    : graph_(graph),
+      state_(graph, 4),
+      reducers_(zone),
+      revisit_(zone),
+      stack_(zone) {}
+
+
+void GraphReducer::AddReducer(Reducer* reducer) {
+  reducers_.push_back(reducer);
 }
 
 
 void GraphReducer::ReduceNode(Node* node) {
-  ZoneVector<Reducer*>::iterator skip = reducers_.end();
-  static const unsigned kMaxAttempts = 16;
-  bool reduce = true;
-  for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
-    if (!reduce) return;
-    reduce = false;  // Assume we don't need to rerun any reducers.
-    int before = graph_->NodeCount();
-    for (ZoneVector<Reducer*>::iterator i = reducers_.begin();
-         i != reducers_.end(); ++i) {
-      if (i == skip) continue;  // Skip this reducer.
-      Reduction reduction = (*i)->Reduce(node);
-      Node* replacement = reduction.replacement();
-      if (replacement == NULL) {
-        // No change from this reducer.
-      } else if (replacement == node) {
-        // {replacement == node} represents an in-place reduction.
-        // Rerun all the reducers except the current one for this node,
-        // as now there may be more opportunities for reduction.
-        reduce = true;
-        skip = i;
-        break;
-      } else {
-        if (node == graph_->start()) graph_->SetStart(replacement);
-        if (node == graph_->end()) graph_->SetEnd(replacement);
-        // If {node} was replaced by an old node, unlink {node} and assume that
-        // {replacement} was already reduced and finish.
-        if (replacement->id() < before) {
-          node->ReplaceUses(replacement);
-          node->Kill();
-          return;
-        }
-        // Otherwise, {node} was replaced by a new node. Replace all old uses of
-        // {node} with {replacement}. New nodes created by this reduction can
-        // use {node}.
-        node->ReplaceUsesIf(
-            std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
-        // Unlink {node} if it's no longer used.
-        if (node->uses().empty()) {
-          node->Kill();
-        }
-        // Rerun all the reductions on the {replacement}.
-        skip = reducers_.end();
-        node = replacement;
-        reduce = true;
-        break;
+  DCHECK(stack_.empty());
+  DCHECK(revisit_.empty());
+  Push(node);
+  for (;;) {
+    if (!stack_.empty()) {
+      // Process the node on the top of the stack, potentially pushing more or
+      // popping the node off the stack.
+      ReduceTop();
+    } else if (!revisit_.empty()) {
+      // If the stack becomes empty, revisit any nodes in the revisit queue.
+      Node* const node = revisit_.top();
+      revisit_.pop();
+      if (state_.Get(node) == State::kRevisit) {
+        // state can change while in queue.
+        Push(node);
       }
+    } else {
+      break;
+    }
+  }
+  DCHECK(revisit_.empty());
+  DCHECK(stack_.empty());
+}
+
+
+void GraphReducer::ReduceGraph() { ReduceNode(graph()->end()); }
+
+
+Reduction GraphReducer::Reduce(Node* const node) {
+  auto skip = reducers_.end();
+  for (auto i = reducers_.begin(); i != reducers_.end();) {
+    if (i != skip) {
+      Reduction reduction = (*i)->Reduce(node);
+      if (!reduction.Changed()) {
+        // No change from this reducer.
+      } else if (reduction.replacement() == node) {
+        // {replacement} == {node} represents an in-place reduction. Rerun
+        // all the other reducers for this node, as now there may be more
+        // opportunities for reduction.
+        skip = i;
+        i = reducers_.begin();
+        continue;
+      } else {
+        // {node} was replaced by another node.
+        return reduction;
+      }
+    }
+    ++i;
+  }
+  if (skip == reducers_.end()) {
+    // No change from any reducer.
+    return Reducer::NoChange();
+  }
+  // At least one reducer did some in-place reduction.
+  return Reducer::Changed(node);
+}
+
+
+void GraphReducer::ReduceTop() {
+  NodeState& entry = stack_.top();
+  Node* node = entry.node;
+  DCHECK(state_.Get(node) == State::kOnStack);
+
+  if (node->IsDead()) return Pop();  // Node was killed while on stack.
+
+  // Recurse on an input if necessary.
+  int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
+  for (int i = start; i < node->InputCount(); i++) {
+    Node* input = node->InputAt(i);
+    entry.input_index = i + 1;
+    if (input != node && Recurse(input)) return;
+  }
+  for (int i = 0; i < start; i++) {
+    Node* input = node->InputAt(i);
+    entry.input_index = i + 1;
+    if (input != node && Recurse(input)) return;
+  }
+
+  // Remember the node count before reduction.
+  const int node_count = graph()->NodeCount();
+
+  // All inputs should be visited or on stack. Apply reductions to node.
+  Reduction reduction = Reduce(node);
+
+  // If there was no reduction, pop {node} and continue.
+  if (!reduction.Changed()) return Pop();
+
+  // Check if the reduction is an in-place update of the {node}.
+  Node* const replacement = reduction.replacement();
+  if (replacement == node) {
+    // In-place update of {node}, may need to recurse on an input.
+    for (int i = 0; i < node->InputCount(); ++i) {
+      Node* input = node->InputAt(i);
+      entry.input_index = i + 1;
+      if (input != node && Recurse(input)) return;
+    }
+  }
+
+  // After reducing the node, pop it off the stack.
+  Pop();
+
+  // Revisit all uses of the node.
+  for (Node* const use : node->uses()) {
+    // Don't revisit this node if it refers to itself.
+    if (use != node) Revisit(use);
+  }
+
+  // Check if we have a new replacement.
+  if (replacement != node) {
+    if (node == graph()->start()) graph()->SetStart(replacement);
+    if (node == graph()->end()) graph()->SetEnd(replacement);
+    // If {node} was replaced by an old node, unlink {node} and assume that
+    // {replacement} was already reduced and finish.
+    if (replacement->id() < node_count) {
+      node->ReplaceUses(replacement);
+      node->Kill();
+    } else {
+      // Otherwise {node} was replaced by a new node. Replace all old uses of
+      // {node} with {replacement}. New nodes created by this reduction can
+      // use {node}.
+      node->ReplaceUsesIf(
+          [node_count](Node* const node) { return node->id() < node_count; },
+          replacement);
+      // Unlink {node} if it's no longer used.
+      if (node->uses().empty()) {
+        node->Kill();
+      }
+
+      // If there was a replacement, reduce it after popping {node}.
+      Recurse(replacement);
     }
   }
 }
 
 
-// A helper class to reuse the node traversal algorithm.
-struct GraphReducerVisitor FINAL : public NullNodeVisitor {
-  explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
-  GenericGraphVisit::Control Post(Node* node) {
-    reducer_->ReduceNode(node);
-    return GenericGraphVisit::CONTINUE;
-  }
-  GraphReducer* reducer_;
-};
-
-
-void GraphReducer::ReduceGraph() {
-  GraphReducerVisitor visitor(this);
-  // Perform a post-order reduction of all nodes starting from the end.
-  graph()->VisitNodeInputsFromEnd(&visitor);
+void GraphReducer::Pop() {
+  Node* node = stack_.top().node;
+  state_.Set(node, State::kVisited);
+  stack_.pop();
 }
 
 
-// TODO(titzer): partial graph reductions.
+void GraphReducer::Push(Node* const node) {
+  DCHECK(state_.Get(node) != State::kOnStack);
+  state_.Set(node, State::kOnStack);
+  stack_.push({node, 0});
+}
+
+
+bool GraphReducer::Recurse(Node* node) {
+  if (state_.Get(node) > State::kRevisit) return false;
+  Push(node);
+  return true;
+}
+
+
+void GraphReducer::Revisit(Node* node) {
+  if (state_.Get(node) == State::kVisited) {
+    state_.Set(node, State::kRevisit);
+    revisit_.push(node);
+  }
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index e0e4f7a..09a650c 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -5,17 +5,13 @@
 #ifndef V8_COMPILER_GRAPH_REDUCER_H_
 #define V8_COMPILER_GRAPH_REDUCER_H_
 
+#include "src/compiler/graph.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-// Forward declarations.
-class Graph;
-class Node;
-
-
 // Represents the result of trying to reduce a node in the graph.
 class Reduction FINAL {
  public:
@@ -55,20 +51,42 @@
 // Performs an iterative reduction of a node graph.
 class GraphReducer FINAL {
  public:
-  explicit GraphReducer(Graph* graph);
+  GraphReducer(Graph* graph, Zone* zone);
 
   Graph* graph() const { return graph_; }
 
-  void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+  void AddReducer(Reducer* reducer);
 
   // Reduce a single node.
-  void ReduceNode(Node* node);
+  void ReduceNode(Node* const);
   // Reduce the whole graph.
   void ReduceGraph();
 
  private:
+  enum class State : uint8_t;
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
+
+  // Reduce a single node.
+  Reduction Reduce(Node* const);
+  // Reduce the node on top of the stack.
+  void ReduceTop();
+
+  // Node stack operations.
+  void Pop();
+  void Push(Node* node);
+
+  // Revisit queue operations.
+  bool Recurse(Node* node);
+  void Revisit(Node* node);
+
   Graph* graph_;
+  NodeMarker<State> state_;
   ZoneVector<Reducer*> reducers_;
+  ZoneStack<Node*> revisit_;
+  ZoneStack<NodeState> stack_;
 
   DISALLOW_COPY_AND_ASSIGN(GraphReducer);
 };
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index 494d431..3a0b783 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -9,7 +9,7 @@
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
-#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
 
 namespace v8 {
 namespace internal {
@@ -24,14 +24,13 @@
 }
 
 
-GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+void GraphReplayPrinter::Pre(Node* node) {
   PrintReplayOpCreator(node->op());
   PrintF("  Node* n%d = graph.NewNode(op", node->id());
   for (int i = 0; i < node->InputCount(); ++i) {
     PrintF(", nil");
   }
   PrintF("); USE(n%d);\n", node->id());
-  return GenericGraphVisit::CONTINUE;
 }
 
 
@@ -60,14 +59,14 @@
       PrintF("unique_constant");
       break;
     case IrOpcode::kPhi:
-      PrintF("%d", op->InputCount());
+      PrintF("%d", op->ValueInputCount());
       break;
     case IrOpcode::kEffectPhi:
-      PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+      PrintF("%d", op->EffectInputCount());
       break;
     case IrOpcode::kLoop:
     case IrOpcode::kMerge:
-      PrintF("%d", OperatorProperties::GetControlInputCount(op));
+      PrintF("%d", op->ControlInputCount());
       break;
     default:
       break;
diff --git a/src/compiler/graph-replay.h b/src/compiler/graph-replay.h
index 53d5247..f41311e 100644
--- a/src/compiler/graph-replay.h
+++ b/src/compiler/graph-replay.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_GRAPH_REPLAY_H_
 #define V8_COMPILER_GRAPH_REPLAY_H_
 
+#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/node.h"
 
 namespace v8 {
@@ -25,7 +26,7 @@
   static void PrintReplay(Graph* graph) {}
 #endif
 
-  GenericGraphVisit::Control Pre(Node* node);
+  void Pre(Node* node);
   void PostEdge(Node* from, int index, Node* to);
 
  private:
diff --git a/src/compiler/graph-unittest.cc b/src/compiler/graph-unittest.cc
deleted file mode 100644
index 75e70cb..0000000
--- a/src/compiler/graph-unittest.cc
+++ /dev/null
@@ -1,779 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-unittest.h"
-
-#include <ostream>  // NOLINT(readability/streams)
-
-#include "src/compiler/node-properties-inl.h"
-
-using testing::_;
-using testing::MakeMatcher;
-using testing::MatcherInterface;
-using testing::MatchResultListener;
-using testing::StringMatchResultListener;
-
-namespace v8 {
-namespace internal {
-
-// TODO(bmeurer): Find a new home for these functions.
-template <typename T>
-inline std::ostream& operator<<(std::ostream& os, const Unique<T>& value) {
-  return os << *value.handle();
-}
-inline std::ostream& operator<<(std::ostream& os,
-                                const ExternalReference& value) {
-  OStringStream ost;
-  compiler::StaticParameterTraits<ExternalReference>::PrintTo(ost, value);
-  return os << ost.c_str();
-}
-
-namespace compiler {
-
-GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
-  graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
-}
-
-
-GraphTest::~GraphTest() {}
-
-
-Node* GraphTest::Parameter(int32_t index) {
-  return graph()->NewNode(common()->Parameter(index), graph()->start());
-}
-
-
-Node* GraphTest::Float32Constant(volatile float value) {
-  return graph()->NewNode(common()->Float32Constant(value));
-}
-
-
-Node* GraphTest::Float64Constant(volatile double value) {
-  return graph()->NewNode(common()->Float64Constant(value));
-}
-
-
-Node* GraphTest::Int32Constant(int32_t value) {
-  return graph()->NewNode(common()->Int32Constant(value));
-}
-
-
-Node* GraphTest::Int64Constant(int64_t value) {
-  return graph()->NewNode(common()->Int64Constant(value));
-}
-
-
-Node* GraphTest::NumberConstant(volatile double value) {
-  return graph()->NewNode(common()->NumberConstant(value));
-}
-
-
-Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
-  return graph()->NewNode(common()->HeapConstant(value));
-}
-
-
-Node* GraphTest::FalseConstant() {
-  return HeapConstant(
-      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
-}
-
-
-Node* GraphTest::TrueConstant() {
-  return HeapConstant(
-      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
-}
-
-
-Matcher<Node*> GraphTest::IsFalseConstant() {
-  return IsHeapConstant(
-      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
-}
-
-
-Matcher<Node*> GraphTest::IsTrueConstant() {
-  return IsHeapConstant(
-      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
-}
-
-namespace {
-
-template <typename T>
-bool PrintMatchAndExplain(const T& value, const char* value_name,
-                          const Matcher<T>& value_matcher,
-                          MatchResultListener* listener) {
-  StringMatchResultListener value_listener;
-  if (!value_matcher.MatchAndExplain(value, &value_listener)) {
-    *listener << "whose " << value_name << " " << value << " doesn't match";
-    if (value_listener.str() != "") {
-      *listener << ", " << value_listener.str();
-    }
-    return false;
-  }
-  return true;
-}
-
-
-class NodeMatcher : public MatcherInterface<Node*> {
- public:
-  explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    *os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    if (node == NULL) {
-      *listener << "which is NULL";
-      return false;
-    }
-    if (node->opcode() != opcode_) {
-      *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode())
-                << " but should have been " << IrOpcode::Mnemonic(opcode_);
-      return false;
-    }
-    return true;
-  }
-
- private:
-  const IrOpcode::Value opcode_;
-};
-
-
-class IsBranchMatcher FINAL : public NodeMatcher {
- public:
-  IsBranchMatcher(const Matcher<Node*>& value_matcher,
-                  const Matcher<Node*>& control_matcher)
-      : NodeMatcher(IrOpcode::kBranch),
-        value_matcher_(value_matcher),
-        control_matcher_(control_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose value (";
-    value_matcher_.DescribeTo(os);
-    *os << ") and control (";
-    control_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
-                                 "value", value_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
-                                 "control", control_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> value_matcher_;
-  const Matcher<Node*> control_matcher_;
-};
-
-
-class IsMergeMatcher FINAL : public NodeMatcher {
- public:
-  IsMergeMatcher(const Matcher<Node*>& control0_matcher,
-                 const Matcher<Node*>& control1_matcher)
-      : NodeMatcher(IrOpcode::kMerge),
-        control0_matcher_(control0_matcher),
-        control1_matcher_(control1_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose control0 (";
-    control0_matcher_.DescribeTo(os);
-    *os << ") and control1 (";
-    control1_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
-                                 "control0", control0_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
-                                 "control1", control1_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> control0_matcher_;
-  const Matcher<Node*> control1_matcher_;
-};
-
-
-class IsControl1Matcher FINAL : public NodeMatcher {
- public:
-  IsControl1Matcher(IrOpcode::Value opcode,
-                    const Matcher<Node*>& control_matcher)
-      : NodeMatcher(opcode), control_matcher_(control_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose control (";
-    control_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
-                                 "control", control_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> control_matcher_;
-};
-
-
-class IsFinishMatcher FINAL : public NodeMatcher {
- public:
-  IsFinishMatcher(const Matcher<Node*>& value_matcher,
-                  const Matcher<Node*>& effect_matcher)
-      : NodeMatcher(IrOpcode::kFinish),
-        value_matcher_(value_matcher),
-        effect_matcher_(effect_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose value (";
-    value_matcher_.DescribeTo(os);
-    *os << ") and effect (";
-    effect_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
-                                 "value", value_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
-                                 effect_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> value_matcher_;
-  const Matcher<Node*> effect_matcher_;
-};
-
-
-template <typename T>
-class IsConstantMatcher FINAL : public NodeMatcher {
- public:
-  IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
-      : NodeMatcher(opcode), value_matcher_(value_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose value (";
-    value_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
-                                 listener));
-  }
-
- private:
-  const Matcher<T> value_matcher_;
-};
-
-
-class IsPhiMatcher FINAL : public NodeMatcher {
- public:
-  IsPhiMatcher(const Matcher<MachineType>& type_matcher,
-               const Matcher<Node*>& value0_matcher,
-               const Matcher<Node*>& value1_matcher,
-               const Matcher<Node*>& control_matcher)
-      : NodeMatcher(IrOpcode::kPhi),
-        type_matcher_(type_matcher),
-        value0_matcher_(value0_matcher),
-        value1_matcher_(value1_matcher),
-        control_matcher_(control_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose type (";
-    type_matcher_.DescribeTo(os);
-    *os << "), value0 (";
-    value0_matcher_.DescribeTo(os);
-    *os << "), value1 (";
-    value1_matcher_.DescribeTo(os);
-    *os << ") and control (";
-    control_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
-                                 type_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
-                                 "value0", value0_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
-                                 "value1", value1_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
-                                 "control", control_matcher_, listener));
-  }
-
- private:
-  const Matcher<MachineType> type_matcher_;
-  const Matcher<Node*> value0_matcher_;
-  const Matcher<Node*> value1_matcher_;
-  const Matcher<Node*> control_matcher_;
-};
-
-
-class IsProjectionMatcher FINAL : public NodeMatcher {
- public:
-  IsProjectionMatcher(const Matcher<size_t>& index_matcher,
-                      const Matcher<Node*>& base_matcher)
-      : NodeMatcher(IrOpcode::kProjection),
-        index_matcher_(index_matcher),
-        base_matcher_(base_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose index (";
-    index_matcher_.DescribeTo(os);
-    *os << ") and base (";
-    base_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(OpParameter<size_t>(node), "index",
-                                 index_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
-                                 base_matcher_, listener));
-  }
-
- private:
-  const Matcher<size_t> index_matcher_;
-  const Matcher<Node*> base_matcher_;
-};
-
-
-class IsCallMatcher FINAL : public NodeMatcher {
- public:
-  IsCallMatcher(const Matcher<CallDescriptor*>& descriptor_matcher,
-                const Matcher<Node*>& value0_matcher,
-                const Matcher<Node*>& value1_matcher,
-                const Matcher<Node*>& value2_matcher,
-                const Matcher<Node*>& value3_matcher,
-                const Matcher<Node*>& effect_matcher,
-                const Matcher<Node*>& control_matcher)
-      : NodeMatcher(IrOpcode::kCall),
-        descriptor_matcher_(descriptor_matcher),
-        value0_matcher_(value0_matcher),
-        value1_matcher_(value1_matcher),
-        value2_matcher_(value2_matcher),
-        value3_matcher_(value3_matcher),
-        effect_matcher_(effect_matcher),
-        control_matcher_(control_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose value0 (";
-    value0_matcher_.DescribeTo(os);
-    *os << ") and value1 (";
-    value1_matcher_.DescribeTo(os);
-    *os << ") and value2 (";
-    value2_matcher_.DescribeTo(os);
-    *os << ") and value3 (";
-    value3_matcher_.DescribeTo(os);
-    *os << ") and effect (";
-    effect_matcher_.DescribeTo(os);
-    *os << ") and control (";
-    control_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
-                                 "descriptor", descriptor_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
-                                 "value0", value0_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
-                                 "value1", value1_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
-                                 "value2", value2_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
-                                 "value3", value3_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
-                                 effect_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
-                                 "control", control_matcher_, listener));
-  }
-
- private:
-  const Matcher<CallDescriptor*> descriptor_matcher_;
-  const Matcher<Node*> value0_matcher_;
-  const Matcher<Node*> value1_matcher_;
-  const Matcher<Node*> value2_matcher_;
-  const Matcher<Node*> value3_matcher_;
-  const Matcher<Node*> effect_matcher_;
-  const Matcher<Node*> control_matcher_;
-};
-
-
-class IsLoadMatcher FINAL : public NodeMatcher {
- public:
-  IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
-                const Matcher<Node*>& base_matcher,
-                const Matcher<Node*>& index_matcher,
-                const Matcher<Node*>& effect_matcher)
-      : NodeMatcher(IrOpcode::kLoad),
-        rep_matcher_(rep_matcher),
-        base_matcher_(base_matcher),
-        index_matcher_(index_matcher),
-        effect_matcher_(effect_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose rep (";
-    rep_matcher_.DescribeTo(os);
-    *os << "), base (";
-    base_matcher_.DescribeTo(os);
-    *os << "), index (";
-    index_matcher_.DescribeTo(os);
-    *os << ") and effect (";
-    effect_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
-                                 rep_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
-                                 base_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
-                                 "index", index_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
-                                 effect_matcher_, listener));
-  }
-
- private:
-  const Matcher<LoadRepresentation> rep_matcher_;
-  const Matcher<Node*> base_matcher_;
-  const Matcher<Node*> index_matcher_;
-  const Matcher<Node*> effect_matcher_;
-};
-
-
-class IsStoreMatcher FINAL : public NodeMatcher {
- public:
-  IsStoreMatcher(const Matcher<MachineType>& type_matcher,
-                 const Matcher<WriteBarrierKind> write_barrier_matcher,
-                 const Matcher<Node*>& base_matcher,
-                 const Matcher<Node*>& index_matcher,
-                 const Matcher<Node*>& value_matcher,
-                 const Matcher<Node*>& effect_matcher,
-                 const Matcher<Node*>& control_matcher)
-      : NodeMatcher(IrOpcode::kStore),
-        type_matcher_(type_matcher),
-        write_barrier_matcher_(write_barrier_matcher),
-        base_matcher_(base_matcher),
-        index_matcher_(index_matcher),
-        value_matcher_(value_matcher),
-        effect_matcher_(effect_matcher),
-        control_matcher_(control_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose type (";
-    type_matcher_.DescribeTo(os);
-    *os << "), write barrier (";
-    write_barrier_matcher_.DescribeTo(os);
-    *os << "), base (";
-    base_matcher_.DescribeTo(os);
-    *os << "), index (";
-    index_matcher_.DescribeTo(os);
-    *os << "), value (";
-    value_matcher_.DescribeTo(os);
-    *os << "), effect (";
-    effect_matcher_.DescribeTo(os);
-    *os << ") and control (";
-    control_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(
-                OpParameter<StoreRepresentation>(node).machine_type(), "type",
-                type_matcher_, listener) &&
-            PrintMatchAndExplain(
-                OpParameter<StoreRepresentation>(node).write_barrier_kind(),
-                "write barrier", write_barrier_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
-                                 base_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
-                                 "index", index_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
-                                 "value", value_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
-                                 effect_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
-                                 "control", control_matcher_, listener));
-  }
-
- private:
-  const Matcher<MachineType> type_matcher_;
-  const Matcher<WriteBarrierKind> write_barrier_matcher_;
-  const Matcher<Node*> base_matcher_;
-  const Matcher<Node*> index_matcher_;
-  const Matcher<Node*> value_matcher_;
-  const Matcher<Node*> effect_matcher_;
-  const Matcher<Node*> control_matcher_;
-};
-
-
-class IsBinopMatcher FINAL : public NodeMatcher {
- public:
-  IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
-                 const Matcher<Node*>& rhs_matcher)
-      : NodeMatcher(opcode),
-        lhs_matcher_(lhs_matcher),
-        rhs_matcher_(rhs_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose lhs (";
-    lhs_matcher_.DescribeTo(os);
-    *os << ") and rhs (";
-    rhs_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
-                                 lhs_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
-                                 rhs_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> lhs_matcher_;
-  const Matcher<Node*> rhs_matcher_;
-};
-
-
-class IsUnopMatcher FINAL : public NodeMatcher {
- public:
-  IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
-      : NodeMatcher(opcode), input_matcher_(input_matcher) {}
-
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
-    NodeMatcher::DescribeTo(os);
-    *os << " whose input (";
-    input_matcher_.DescribeTo(os);
-    *os << ")";
-  }
-
-  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
-      OVERRIDE {
-    return (NodeMatcher::MatchAndExplain(node, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
-                                 "input", input_matcher_, listener));
-  }
-
- private:
-  const Matcher<Node*> input_matcher_;
-};
-}
-
-
-Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
-                        const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(new IsBranchMatcher(value_matcher, control_matcher));
-}
-
-
-Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
-                       const Matcher<Node*>& control1_matcher) {
-  return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
-}
-
-
-Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
-}
-
-
-Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(
-      new IsControl1Matcher(IrOpcode::kIfFalse, control_matcher));
-}
-
-
-Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(
-      new IsControl1Matcher(IrOpcode::kControlEffect, control_matcher));
-}
-
-
-Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
-  return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
-}
-
-
-Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
-                        const Matcher<Node*>& effect_matcher) {
-  return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
-}
-
-
-Matcher<Node*> IsExternalConstant(
-    const Matcher<ExternalReference>& value_matcher) {
-  return MakeMatcher(new IsConstantMatcher<ExternalReference>(
-      IrOpcode::kExternalConstant, value_matcher));
-}
-
-
-Matcher<Node*> IsHeapConstant(
-    const Matcher<Unique<HeapObject> >& value_matcher) {
-  return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
-      IrOpcode::kHeapConstant, value_matcher));
-}
-
-
-Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
-  return MakeMatcher(
-      new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
-}
-
-
-Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) {
-  return MakeMatcher(
-      new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
-}
-
-
-Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
-  return MakeMatcher(
-      new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
-}
-
-
-Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
-  return MakeMatcher(
-      new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
-}
-
-
-Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
-  return MakeMatcher(
-      new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
-}
-
-
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
-                     const Matcher<Node*>& value0_matcher,
-                     const Matcher<Node*>& value1_matcher,
-                     const Matcher<Node*>& merge_matcher) {
-  return MakeMatcher(new IsPhiMatcher(type_matcher, value0_matcher,
-                                      value1_matcher, merge_matcher));
-}
-
-
-Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
-                            const Matcher<Node*>& base_matcher) {
-  return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
-}
-
-
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
-                      const Matcher<Node*>& value0_matcher,
-                      const Matcher<Node*>& value1_matcher,
-                      const Matcher<Node*>& value2_matcher,
-                      const Matcher<Node*>& value3_matcher,
-                      const Matcher<Node*>& effect_matcher,
-                      const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(new IsCallMatcher(
-      descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
-      value3_matcher, effect_matcher, control_matcher));
-}
-
-
-Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
-                      const Matcher<Node*>& base_matcher,
-                      const Matcher<Node*>& index_matcher,
-                      const Matcher<Node*>& effect_matcher) {
-  return MakeMatcher(new IsLoadMatcher(rep_matcher, base_matcher, index_matcher,
-                                       effect_matcher));
-}
-
-
-Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
-                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
-                       const Matcher<Node*>& base_matcher,
-                       const Matcher<Node*>& index_matcher,
-                       const Matcher<Node*>& value_matcher,
-                       const Matcher<Node*>& effect_matcher,
-                       const Matcher<Node*>& control_matcher) {
-  return MakeMatcher(new IsStoreMatcher(
-      type_matcher, write_barrier_matcher, base_matcher, index_matcher,
-      value_matcher, effect_matcher, control_matcher));
-}
-
-
-#define IS_BINOP_MATCHER(Name)                                            \
-  Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher,              \
-                          const Matcher<Node*>& rhs_matcher) {            \
-    return MakeMatcher(                                                   \
-        new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
-  }
-IS_BINOP_MATCHER(NumberLessThan)
-IS_BINOP_MATCHER(Word32And)
-IS_BINOP_MATCHER(Word32Sar)
-IS_BINOP_MATCHER(Word32Shl)
-IS_BINOP_MATCHER(Word32Ror)
-IS_BINOP_MATCHER(Word32Equal)
-IS_BINOP_MATCHER(Word64And)
-IS_BINOP_MATCHER(Word64Sar)
-IS_BINOP_MATCHER(Word64Shl)
-IS_BINOP_MATCHER(Word64Equal)
-IS_BINOP_MATCHER(Int32AddWithOverflow)
-IS_BINOP_MATCHER(Int32Mul)
-IS_BINOP_MATCHER(Uint32LessThanOrEqual)
-#undef IS_BINOP_MATCHER
-
-
-#define IS_UNOP_MATCHER(Name)                                                \
-  Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) {             \
-    return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
-  }
-IS_UNOP_MATCHER(ChangeFloat64ToInt32)
-IS_UNOP_MATCHER(ChangeFloat64ToUint32)
-IS_UNOP_MATCHER(ChangeInt32ToFloat64)
-IS_UNOP_MATCHER(ChangeInt32ToInt64)
-IS_UNOP_MATCHER(ChangeUint32ToFloat64)
-IS_UNOP_MATCHER(ChangeUint32ToUint64)
-IS_UNOP_MATCHER(TruncateFloat64ToInt32)
-IS_UNOP_MATCHER(TruncateInt64ToInt32)
-IS_UNOP_MATCHER(Float64Sqrt)
-#undef IS_UNOP_MATCHER
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/graph-unittest.h b/src/compiler/graph-unittest.h
deleted file mode 100644
index 1dc9c3d..0000000
--- a/src/compiler/graph-unittest.h
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_UNITTEST_H_
-#define V8_COMPILER_GRAPH_UNITTEST_H_
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/test/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class HeapObject;
-template <class T>
-class Unique;
-
-namespace compiler {
-
-using ::testing::Matcher;
-
-
-class GraphTest : public TestWithContext, public TestWithZone {
- public:
-  explicit GraphTest(int parameters = 1);
-  virtual ~GraphTest();
-
- protected:
-  Node* Parameter(int32_t index);
-  Node* Float32Constant(volatile float value);
-  Node* Float64Constant(volatile double value);
-  Node* Int32Constant(int32_t value);
-  Node* Int64Constant(int64_t value);
-  Node* NumberConstant(volatile double value);
-  Node* HeapConstant(const Unique<HeapObject>& value);
-  Node* FalseConstant();
-  Node* TrueConstant();
-
-  Matcher<Node*> IsFalseConstant();
-  Matcher<Node*> IsTrueConstant();
-
-  CommonOperatorBuilder* common() { return &common_; }
-  Graph* graph() { return &graph_; }
-
- private:
-  CommonOperatorBuilder common_;
-  Graph graph_;
-};
-
-
-Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
-                        const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
-                       const Matcher<Node*>& control1_matcher);
-Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
-Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
-                        const Matcher<Node*>& effect_matcher);
-Matcher<Node*> IsExternalConstant(
-    const Matcher<ExternalReference>& value_matcher);
-Matcher<Node*> IsHeapConstant(
-    const Matcher<Unique<HeapObject> >& value_matcher);
-Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
-Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
-Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
-Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
-Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
-                     const Matcher<Node*>& value0_matcher,
-                     const Matcher<Node*>& value1_matcher,
-                     const Matcher<Node*>& merge_matcher);
-Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
-                            const Matcher<Node*>& base_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
-                      const Matcher<Node*>& value0_matcher,
-                      const Matcher<Node*>& value1_matcher,
-                      const Matcher<Node*>& value2_matcher,
-                      const Matcher<Node*>& value3_matcher,
-                      const Matcher<Node*>& effect_matcher,
-                      const Matcher<Node*>& control_matcher);
-
-Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
-                                const Matcher<Node*>& rhs_matcher);
-
-Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
-                      const Matcher<Node*>& base_matcher,
-                      const Matcher<Node*>& index_matcher,
-                      const Matcher<Node*>& effect_matcher);
-Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
-                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
-                       const Matcher<Node*>& base_matcher,
-                       const Matcher<Node*>& index_matcher,
-                       const Matcher<Node*>& value_matcher,
-                       const Matcher<Node*>& effect_matcher,
-                       const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
-                             const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
-                           const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
-                             const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
-                                      const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
-                          const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
-                                       const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
-
-}  //  namespace compiler
-}  //  namespace internal
-}  //  namespace v8
-
-#endif  // V8_COMPILER_GRAPH_UNITTEST_H_
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 10d6698..e018c7a 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -4,9 +4,10 @@
 
 #include "src/compiler/graph-visualizer.h"
 
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
+#include <sstream>
+#include <string>
+
+#include "src/code-stubs.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
@@ -14,33 +15,223 @@
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
 #include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+static const char* SafeMnemonic(Node* node) {
+  return node == NULL ? "null" : node->op()->mnemonic();
+}
+
 #define DEAD_COLOR "#999999"
 
-class GraphVisualizer : public NullNodeVisitor {
+class AllNodes {
  public:
-  GraphVisualizer(OStream& os, Zone* zone, const Graph* graph);  // NOLINT
+  enum State { kDead, kGray, kLive };
+
+  AllNodes(Zone* local_zone, const Graph* graph)
+      : state(graph->NodeCount(), kDead, local_zone),
+        live(local_zone),
+        gray(local_zone) {
+    Node* end = graph->end();
+    state[end->id()] = kLive;
+    live.push_back(end);
+    // Find all live nodes reachable from end.
+    for (size_t i = 0; i < live.size(); i++) {
+      for (Node* const input : live[i]->inputs()) {
+        if (input == NULL) {
+          // TODO(titzer): print a warning.
+          continue;
+        }
+        if (input->id() >= graph->NodeCount()) {
+          // TODO(titzer): print a warning.
+          continue;
+        }
+        if (state[input->id()] != kLive) {
+          live.push_back(input);
+          state[input->id()] = kLive;
+        }
+      }
+    }
+
+    // Find all nodes that are not reachable from end that use live nodes.
+    for (size_t i = 0; i < live.size(); i++) {
+      for (Node* const use : live[i]->uses()) {
+        if (state[use->id()] == kDead) {
+          gray.push_back(use);
+          state[use->id()] = kGray;
+        }
+      }
+    }
+  }
+
+  bool IsLive(Node* node) {
+    return node != NULL && node->id() < static_cast<int>(state.size()) &&
+           state[node->id()] == kLive;
+  }
+
+  ZoneVector<State> state;
+  NodeVector live;
+  NodeVector gray;
+};
+
+
+class Escaped {
+ public:
+  explicit Escaped(const std::ostringstream& os,
+                   const char* escaped_chars = "<>|{}")
+      : str_(os.str()), escaped_chars_(escaped_chars) {}
+
+  friend std::ostream& operator<<(std::ostream& os, const Escaped& e) {
+    for (std::string::const_iterator i = e.str_.begin(); i != e.str_.end();
+         ++i) {
+      if (e.needs_escape(*i)) os << "\\";
+      os << *i;
+    }
+    return os;
+  }
+
+ private:
+  bool needs_escape(char ch) const {
+    for (size_t i = 0; i < strlen(escaped_chars_); ++i) {
+      if (ch == escaped_chars_[i]) return true;
+    }
+    return false;
+  }
+
+  const std::string str_;
+  const char* const escaped_chars_;
+};
+
+class JSONGraphNodeWriter {
+ public:
+  JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph)
+      : os_(os), all_(zone, graph), first_node_(true) {}
+
+  void Print() {
+    for (Node* const node : all_.live) PrintNode(node);
+  }
+
+  void PrintNode(Node* node) {
+    if (first_node_) {
+      first_node_ = false;
+    } else {
+      os_ << ",";
+    }
+    std::ostringstream label;
+    label << *node->op();
+    os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
+        << "\"";
+    IrOpcode::Value opcode = node->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
+          << "]";
+      os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
+          << "]";
+    } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
+               opcode == IrOpcode::kLoop) {
+      os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
+          << "]";
+    }
+    if (opcode == IrOpcode::kBranch) {
+      os_ << ",\"rankInputs\":[0]";
+    }
+    os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
+    os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
+                                                               : "false");
+    os_ << "}";
+  }
+
+ private:
+  std::ostream& os_;
+  AllNodes all_;
+  bool first_node_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
+};
+
+
+class JSONGraphEdgeWriter {
+ public:
+  JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
+      : os_(os), all_(zone, graph), first_edge_(true) {}
+
+  void Print() {
+    for (Node* const node : all_.live) PrintEdges(node);
+  }
+
+  void PrintEdges(Node* node) {
+    for (int i = 0; i < node->InputCount(); i++) {
+      Node* input = node->InputAt(i);
+      if (input == NULL) continue;
+      PrintEdge(node, i, input);
+    }
+  }
+
+  void PrintEdge(Node* from, int index, Node* to) {
+    if (first_edge_) {
+      first_edge_ = false;
+    } else {
+      os_ << ",";
+    }
+    const char* edge_type = NULL;
+    if (index < NodeProperties::FirstValueIndex(from)) {
+      edge_type = "unknown";
+    } else if (index < NodeProperties::FirstContextIndex(from)) {
+      edge_type = "value";
+    } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
+      edge_type = "context";
+    } else if (index < NodeProperties::FirstEffectIndex(from)) {
+      edge_type = "frame-state";
+    } else if (index < NodeProperties::FirstControlIndex(from)) {
+      edge_type = "effect";
+    } else {
+      edge_type = "control";
+    }
+    os_ << "{\"source\":" << SafeId(to) << ",\"target\":" << SafeId(from)
+        << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
+  }
+
+ private:
+  std::ostream& os_;
+  AllNodes all_;
+  bool first_edge_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONGraphEdgeWriter);
+};
+
+
+std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
+  Zone tmp_zone(ad.graph.zone()->isolate());
+  os << "{\"nodes\":[";
+  JSONGraphNodeWriter(os, &tmp_zone, &ad.graph).Print();
+  os << "],\"edges\":[";
+  JSONGraphEdgeWriter(os, &tmp_zone, &ad.graph).Print();
+  os << "]}";
+  return os;
+}
+
+
+class GraphVisualizer {
+ public:
+  GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph)
+      : all_(zone, graph), os_(os) {}
 
   void Print();
 
-  GenericGraphVisit::Control Pre(Node* node);
-  GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+  void PrintNode(Node* node, bool gray);
 
  private:
-  void AnnotateNode(Node* node);
-  void PrintEdge(Node::Edge edge);
+  void PrintEdge(Edge edge);
 
-  Zone* zone_;
-  NodeSet all_nodes_;
-  NodeSet white_nodes_;
-  bool use_to_def_;
-  OStream& os_;
-  const Graph* const graph_;
+  AllNodes all_;
+  std::ostream& os_;
 
   DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
 };
@@ -49,92 +240,24 @@
 static Node* GetControlCluster(Node* node) {
   if (OperatorProperties::IsBasicBlockBegin(node->op())) {
     return node;
-  } else if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
+  } else if (node->op()->ControlInputCount() == 1) {
     Node* control = NodeProperties::GetControlInput(node, 0);
-    return OperatorProperties::IsBasicBlockBegin(control->op()) ? control
-                                                                : NULL;
+    return control != NULL &&
+                   OperatorProperties::IsBasicBlockBegin(control->op())
+               ? control
+               : NULL;
   } else {
     return NULL;
   }
 }
 
 
-GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
-  if (all_nodes_.count(node) == 0) {
-    Node* control_cluster = GetControlCluster(node);
-    if (control_cluster != NULL) {
-      os_ << "  subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
-    }
-    os_ << "  ID" << node->id() << " [\n";
-    AnnotateNode(node);
-    os_ << "  ]\n";
-    if (control_cluster != NULL) os_ << "  }\n";
-    all_nodes_.insert(node);
-    if (use_to_def_) white_nodes_.insert(node);
+void GraphVisualizer::PrintNode(Node* node, bool gray) {
+  Node* control_cluster = GetControlCluster(node);
+  if (control_cluster != NULL) {
+    os_ << "  subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
   }
-  return GenericGraphVisit::CONTINUE;
-}
-
-
-GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
-                                                    Node* to) {
-  if (use_to_def_) return GenericGraphVisit::CONTINUE;
-  // When going from def to use, only consider white -> other edges, which are
-  // the dead nodes that use live nodes. We're probably not interested in
-  // dead nodes that only use other dead nodes.
-  if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
-  return GenericGraphVisit::SKIP;
-}
-
-
-class Escaped {
- public:
-  explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
-
-  friend OStream& operator<<(OStream& os, const Escaped& e) {
-    for (const char* s = e.str_; *s != '\0'; ++s) {
-      if (needs_escape(*s)) os << "\\";
-      os << *s;
-    }
-    return os;
-  }
-
- private:
-  static bool needs_escape(char ch) {
-    switch (ch) {
-      case '>':
-      case '<':
-      case '|':
-      case '}':
-      case '{':
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  const char* const str_;
-};
-
-
-static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
-  if (from->opcode() == IrOpcode::kPhi ||
-      from->opcode() == IrOpcode::kEffectPhi) {
-    Node* control = NodeProperties::GetControlInput(from, 0);
-    return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
-  } else if (from->opcode() == IrOpcode::kLoop) {
-    return index != 0;
-  } else {
-    return false;
-  }
-}
-
-
-void GraphVisualizer::AnnotateNode(Node* node) {
-  if (!use_to_def_) {
-    os_ << "    style=\"filled\"\n"
-        << "    fillcolor=\"" DEAD_COLOR "\"\n";
-  }
+  os_ << "  ID" << SafeId(node) << " [\n";
 
   os_ << "    shape=\"record\"\n";
   switch (node->opcode()) {
@@ -153,68 +276,89 @@
       break;
   }
 
-  OStringStream label;
-  label << *node->op();
-  os_ << "    label=\"{{#" << node->id() << ":" << Escaped(label);
+  if (gray) {
+    os_ << "    style=\"filled\"\n"
+        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+  }
 
-  InputIter i = node->inputs().begin();
-  for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
-       ++i, j--) {
-    os_ << "|<I" << i.index() << ">#" << (*i)->id();
+  std::ostringstream label;
+  label << *node->op();
+  os_ << "    label=\"{{#" << SafeId(node) << ":" << Escaped(label);
+
+  auto i = node->input_edges().begin();
+  for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
+    os_ << "|<I" << (*i).index() << ">#" << SafeId((*i).to());
   }
   for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+    os_ << "|<I" << (*i).index() << ">X #" << SafeId((*i).to());
   }
   for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">F #" << (*i)->id();
+    os_ << "|<I" << (*i).index() << ">F #" << SafeId((*i).to());
   }
-  for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
-       ++i, j--) {
-    os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+  for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
+    os_ << "|<I" << (*i).index() << ">E #" << SafeId((*i).to());
   }
 
-  if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) ||
+  if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
       GetControlCluster(node) == NULL) {
-    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
-         ++i, j--) {
-      os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+    for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
+      os_ << "|<I" << (*i).index() << ">C #" << SafeId((*i).to());
     }
   }
   os_ << "}";
 
-  if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+  if (FLAG_trace_turbo_types && NodeProperties::IsTyped(node)) {
     Bounds bounds = NodeProperties::GetBounds(node);
-    OStringStream upper;
+    std::ostringstream upper;
     bounds.upper->PrintTo(upper);
-    OStringStream lower;
+    std::ostringstream lower;
     bounds.lower->PrintTo(lower);
     os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
   }
   os_ << "}\"\n";
+
+  os_ << "  ]\n";
+  if (control_cluster != NULL) os_ << "  }\n";
 }
 
 
-void GraphVisualizer::PrintEdge(Node::Edge edge) {
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+  if (from->opcode() == IrOpcode::kPhi ||
+      from->opcode() == IrOpcode::kEffectPhi) {
+    Node* control = NodeProperties::GetControlInput(from, 0);
+    return control != NULL && control->opcode() != IrOpcode::kMerge &&
+           control != to && index != 0;
+  } else if (from->opcode() == IrOpcode::kLoop) {
+    return index != 0;
+  } else {
+    return false;
+  }
+}
+
+
+void GraphVisualizer::PrintEdge(Edge edge) {
   Node* from = edge.from();
   int index = edge.index();
   Node* to = edge.to();
+
+  if (!all_.IsLive(to)) return;  // skip inputs that point to dead or NULL.
+
   bool unconstrained = IsLikelyBackEdge(from, index, to);
-  os_ << "  ID" << from->id();
-  if (all_nodes_.count(to) == 0) {
-    os_ << ":I" << index << ":n -> DEAD_INPUT";
-  } else if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
-             GetControlCluster(from) == NULL ||
-             (OperatorProperties::GetControlInputCount(from->op()) > 0 &&
-              NodeProperties::GetControlInput(from) != to)) {
-    os_ << ":I" << index << ":n -> ID" << to->id() << ":s"
+  os_ << "  ID" << SafeId(from);
+
+  if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
+      GetControlCluster(from) == NULL ||
+      (from->op()->ControlInputCount() > 0 &&
+       NodeProperties::GetControlInput(from) != to)) {
+    os_ << ":I" << index << ":n -> ID" << SafeId(to) << ":s"
         << "[" << (unconstrained ? "constraint=false, " : "")
         << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
         << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
         << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
   } else {
-    os_ << " -> ID" << to->id() << ":s [color=transparent, "
+    os_ << " -> ID" << SafeId(to) << ":s [color=transparent, "
         << (unconstrained ? "constraint=false, " : "")
         << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
   }
@@ -233,50 +377,456 @@
       << "  \n";
 
   // Make sure all nodes have been output before writing out the edges.
-  use_to_def_ = true;
-  // TODO(svenpanne) Remove the need for the const_casts.
-  const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
-  white_nodes_.insert(const_cast<Graph*>(graph_)->start());
-
-  // Visit all uses of white nodes.
-  use_to_def_ = false;
-  GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
-      const_cast<Graph*>(graph_), zone_, white_nodes_.begin(),
-      white_nodes_.end(), this);
-
-  os_ << "  DEAD_INPUT [\n"
-      << "    style=\"filled\" \n"
-      << "    fillcolor=\"" DEAD_COLOR "\"\n"
-      << "  ]\n"
-      << "\n";
+  for (Node* const node : all_.live) PrintNode(node, false);
+  for (Node* const node : all_.gray) PrintNode(node, true);
 
   // With all the nodes written, add the edges.
-  for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
-    Node::Inputs inputs = (*i)->inputs();
-    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-         ++iter) {
-      PrintEdge(iter.edge());
+  for (Node* const node : all_.live) {
+    for (Edge edge : node->use_edges()) {
+      PrintEdge(edge);
     }
   }
   os_ << "}\n";
 }
 
 
-GraphVisualizer::GraphVisualizer(OStream& os, Zone* zone,
-                                 const Graph* graph)  // NOLINT
-    : zone_(zone),
-      all_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
-      white_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
-      use_to_def_(true),
-      os_(os),
-      graph_(graph) {}
-
-
-OStream& operator<<(OStream& os, const AsDOT& ad) {
+std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
   Zone tmp_zone(ad.graph.zone()->isolate());
   GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
   return os;
 }
+
+
+class GraphC1Visualizer {
+ public:
+  GraphC1Visualizer(std::ostream& os, Zone* zone);  // NOLINT
+
+  void PrintCompilation(const CompilationInfo* info);
+  void PrintSchedule(const char* phase, const Schedule* schedule,
+                     const SourcePositionTable* positions,
+                     const InstructionSequence* instructions);
+  void PrintAllocator(const char* phase, const RegisterAllocator* allocator);
+  Zone* zone() const { return zone_; }
+
+ private:
+  void PrintIndent();
+  void PrintStringProperty(const char* name, const char* value);
+  void PrintLongProperty(const char* name, int64_t value);
+  void PrintIntProperty(const char* name, int value);
+  void PrintBlockProperty(const char* name, BasicBlock::Id block_id);
+  void PrintNodeId(Node* n);
+  void PrintNode(Node* n);
+  void PrintInputs(Node* n);
+  void PrintInputs(InputIter* i, int count, const char* prefix);
+  void PrintType(Node* node);
+
+  void PrintLiveRange(LiveRange* range, const char* type);
+  class Tag FINAL BASE_EMBEDDED {
+   public:
+    Tag(GraphC1Visualizer* visualizer, const char* name) {
+      name_ = name;
+      visualizer_ = visualizer;
+      visualizer->PrintIndent();
+      visualizer_->os_ << "begin_" << name << "\n";
+      visualizer->indent_++;
+    }
+
+    ~Tag() {
+      visualizer_->indent_--;
+      visualizer_->PrintIndent();
+      visualizer_->os_ << "end_" << name_ << "\n";
+      DCHECK(visualizer_->indent_ >= 0);
+    }
+
+   private:
+    GraphC1Visualizer* visualizer_;
+    const char* name_;
+  };
+
+  std::ostream& os_;
+  int indent_;
+  Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphC1Visualizer);
+};
+
+
+void GraphC1Visualizer::PrintIndent() {
+  for (int i = 0; i < indent_; i++) {
+    os_ << "  ";
+  }
+}
+
+
+GraphC1Visualizer::GraphC1Visualizer(std::ostream& os, Zone* zone)
+    : os_(os), indent_(0), zone_(zone) {}
+
+
+void GraphC1Visualizer::PrintStringProperty(const char* name,
+                                            const char* value) {
+  PrintIndent();
+  os_ << name << " \"" << value << "\"\n";
+}
+
+
+void GraphC1Visualizer::PrintLongProperty(const char* name, int64_t value) {
+  PrintIndent();
+  os_ << name << " " << static_cast<int>(value / 1000) << "\n";
+}
+
+
+void GraphC1Visualizer::PrintBlockProperty(const char* name,
+                                           BasicBlock::Id block_id) {
+  PrintIndent();
+  os_ << name << " \"B" << block_id << "\"\n";
+}
+
+
+void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
+  PrintIndent();
+  os_ << name << " " << value << "\n";
+}
+
+
+void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
+  Tag tag(this, "compilation");
+  if (info->IsOptimizing()) {
+    Handle<String> name = info->function()->debug_name();
+    PrintStringProperty("name", name->ToCString().get());
+    PrintIndent();
+    os_ << "method \"" << name->ToCString().get() << ":"
+        << info->optimization_id() << "\"\n";
+  } else {
+    CodeStub::Major major_key = info->code_stub()->MajorKey();
+    PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+    PrintStringProperty("method", "stub");
+  }
+  PrintLongProperty("date",
+                    static_cast<int64_t>(base::OS::TimeCurrentMillis()));
+}
+
+
+void GraphC1Visualizer::PrintNodeId(Node* n) { os_ << "n" << SafeId(n); }
+
+
+void GraphC1Visualizer::PrintNode(Node* n) {
+  PrintNodeId(n);
+  os_ << " " << *n->op() << " ";
+  PrintInputs(n);
+}
+
+
+void GraphC1Visualizer::PrintInputs(InputIter* i, int count,
+                                    const char* prefix) {
+  if (count > 0) {
+    os_ << prefix;
+  }
+  while (count > 0) {
+    os_ << " ";
+    PrintNodeId(**i);
+    ++(*i);
+    count--;
+  }
+}
+
+
+void GraphC1Visualizer::PrintInputs(Node* node) {
+  auto i = node->inputs().begin();
+  PrintInputs(&i, node->op()->ValueInputCount(), " ");
+  PrintInputs(&i, OperatorProperties::GetContextInputCount(node->op()),
+              " Ctx:");
+  PrintInputs(&i, OperatorProperties::GetFrameStateInputCount(node->op()),
+              " FS:");
+  PrintInputs(&i, node->op()->EffectInputCount(), " Eff:");
+  PrintInputs(&i, node->op()->ControlInputCount(), " Ctrl:");
+}
+
+
+void GraphC1Visualizer::PrintType(Node* node) {
+  if (NodeProperties::IsTyped(node)) {
+    Bounds bounds = NodeProperties::GetBounds(node);
+    os_ << " type:";
+    bounds.upper->PrintTo(os_);
+    os_ << "..";
+    bounds.lower->PrintTo(os_);
+  }
+}
+
+
+void GraphC1Visualizer::PrintSchedule(const char* phase,
+                                      const Schedule* schedule,
+                                      const SourcePositionTable* positions,
+                                      const InstructionSequence* instructions) {
+  Tag tag(this, "cfg");
+  PrintStringProperty("name", phase);
+  const BasicBlockVector* rpo = schedule->rpo_order();
+  for (size_t i = 0; i < rpo->size(); i++) {
+    BasicBlock* current = (*rpo)[i];
+    Tag block_tag(this, "block");
+    PrintBlockProperty("name", current->id());
+    PrintIntProperty("from_bci", -1);
+    PrintIntProperty("to_bci", -1);
+
+    PrintIndent();
+    os_ << "predecessors";
+    for (BasicBlock::Predecessors::iterator j = current->predecessors_begin();
+         j != current->predecessors_end(); ++j) {
+      os_ << " \"B" << (*j)->id() << "\"";
+    }
+    os_ << "\n";
+
+    PrintIndent();
+    os_ << "successors";
+    for (BasicBlock::Successors::iterator j = current->successors_begin();
+         j != current->successors_end(); ++j) {
+      os_ << " \"B" << (*j)->id() << "\"";
+    }
+    os_ << "\n";
+
+    PrintIndent();
+    os_ << "xhandlers\n";
+
+    PrintIndent();
+    os_ << "flags\n";
+
+    if (current->dominator() != NULL) {
+      PrintBlockProperty("dominator", current->dominator()->id());
+    }
+
+    PrintIntProperty("loop_depth", current->loop_depth());
+
+    const InstructionBlock* instruction_block =
+        instructions->InstructionBlockAt(current->GetRpoNumber());
+    if (instruction_block->code_start() >= 0) {
+      int first_index = instruction_block->first_instruction_index();
+      int last_index = instruction_block->last_instruction_index();
+      PrintIntProperty("first_lir_id", LifetimePosition::FromInstructionIndex(
+                                           first_index).Value());
+      PrintIntProperty("last_lir_id", LifetimePosition::FromInstructionIndex(
+                                          last_index).Value());
+    }
+
+    {
+      Tag states_tag(this, "states");
+      Tag locals_tag(this, "locals");
+      int total = 0;
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        if ((*i)->opcode() == IrOpcode::kPhi) total++;
+      }
+      PrintIntProperty("size", total);
+      PrintStringProperty("method", "None");
+      int index = 0;
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        if ((*i)->opcode() != IrOpcode::kPhi) continue;
+        PrintIndent();
+        os_ << index << " ";
+        PrintNodeId(*i);
+        os_ << " [";
+        PrintInputs(*i);
+        os_ << "]\n";
+        index++;
+      }
+    }
+
+    {
+      Tag HIR_tag(this, "HIR");
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        Node* node = *i;
+        if (node->opcode() == IrOpcode::kPhi) continue;
+        int uses = node->UseCount();
+        PrintIndent();
+        os_ << "0 " << uses << " ";
+        PrintNode(node);
+        if (FLAG_trace_turbo_types) {
+          os_ << " ";
+          PrintType(node);
+        }
+        if (positions != NULL) {
+          SourcePosition position = positions->GetSourcePosition(node);
+          if (!position.IsUnknown()) {
+            DCHECK(!position.IsInvalid());
+            os_ << " pos:" << position.raw();
+          }
+        }
+        os_ << " <|@\n";
+      }
+
+      BasicBlock::Control control = current->control();
+      if (control != BasicBlock::kNone) {
+        PrintIndent();
+        os_ << "0 0 ";
+        if (current->control_input() != NULL) {
+          PrintNode(current->control_input());
+        } else {
+          os_ << -1 - current->id().ToInt() << " Goto";
+        }
+        os_ << " ->";
+        for (BasicBlock::Successors::iterator j = current->successors_begin();
+             j != current->successors_end(); ++j) {
+          os_ << " B" << (*j)->id();
+        }
+        if (FLAG_trace_turbo_types && current->control_input() != NULL) {
+          os_ << " ";
+          PrintType(current->control_input());
+        }
+        os_ << " <|@\n";
+      }
+    }
+
+    if (instructions != NULL) {
+      Tag LIR_tag(this, "LIR");
+      for (int j = instruction_block->first_instruction_index();
+           j <= instruction_block->last_instruction_index(); j++) {
+        PrintIndent();
+        PrintableInstruction printable = {RegisterConfiguration::ArchDefault(),
+                                          instructions->InstructionAt(j)};
+        os_ << j << " " << printable << " <|@\n";
+      }
+    }
+  }
+}
+
+
+void GraphC1Visualizer::PrintAllocator(const char* phase,
+                                       const RegisterAllocator* allocator) {
+  Tag tag(this, "intervals");
+  PrintStringProperty("name", phase);
+
+  for (auto range : allocator->fixed_double_live_ranges()) {
+    PrintLiveRange(range, "fixed");
+  }
+
+  for (auto range : allocator->fixed_live_ranges()) {
+    PrintLiveRange(range, "fixed");
+  }
+
+  for (auto range : allocator->live_ranges()) {
+    PrintLiveRange(range, "object");
+  }
+}
+
+
+void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
+  if (range != NULL && !range->IsEmpty()) {
+    PrintIndent();
+    os_ << range->id() << " " << type;
+    if (range->HasRegisterAssigned()) {
+      InstructionOperand* op = range->CreateAssignedOperand(zone());
+      int assigned_reg = op->index();
+      if (op->IsDoubleRegister()) {
+        os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
+            << "\"";
+      } else {
+        DCHECK(op->IsRegister());
+        os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
+      }
+    } else if (range->IsSpilled()) {
+      int index = -1;
+      if (range->TopLevel()->HasSpillRange()) {
+        index = kMaxInt;  // This hasn't been set yet.
+      } else {
+        index = range->TopLevel()->GetSpillOperand()->index();
+      }
+      if (range->TopLevel()->Kind() == DOUBLE_REGISTERS) {
+        os_ << " \"double_stack:" << index << "\"";
+      } else if (range->TopLevel()->Kind() == GENERAL_REGISTERS) {
+        os_ << " \"stack:" << index << "\"";
+      } else {
+        os_ << " \"const(nostack):" << index << "\"";
+      }
+    }
+    int parent_index = -1;
+    if (range->IsChild()) {
+      parent_index = range->parent()->id();
+    } else {
+      parent_index = range->id();
+    }
+    InstructionOperand* op = range->FirstHint();
+    int hint_index = -1;
+    if (op != NULL && op->IsUnallocated()) {
+      hint_index = UnallocatedOperand::cast(op)->virtual_register();
+    }
+    os_ << " " << parent_index << " " << hint_index;
+    UseInterval* cur_interval = range->first_interval();
+    while (cur_interval != NULL && range->Covers(cur_interval->start())) {
+      os_ << " [" << cur_interval->start().Value() << ", "
+          << cur_interval->end().Value() << "[";
+      cur_interval = cur_interval->next();
+    }
+
+    UsePosition* current_pos = range->first_pos();
+    while (current_pos != NULL) {
+      if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
+        os_ << " " << current_pos->pos().Value() << " M";
+      }
+      current_pos = current_pos->next();
+    }
+
+    os_ << " \"\"\n";
+  }
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
+  Zone tmp_zone(ac.info_->isolate());
+  GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
+  Zone tmp_zone(ac.schedule_->zone()->isolate());
+  GraphC1Visualizer(os, &tmp_zone)
+      .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) {
+  Zone tmp_zone(ac.allocator_->code()->zone()->isolate());
+  GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_);
+  return os;
+}
+
+const int kUnvisited = 0;
+const int kOnStack = 1;
+const int kVisited = 2;
+
+std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
+  Zone local_zone(ar.graph.zone()->isolate());
+  ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
+  ZoneStack<Node*> stack(&local_zone);
+
+  stack.push(ar.graph.end());
+  state[ar.graph.end()->id()] = kOnStack;
+  while (!stack.empty()) {
+    Node* n = stack.top();
+    bool pop = true;
+    for (Node* const i : n->inputs()) {
+      if (state[i->id()] == kUnvisited) {
+        state[i->id()] = kOnStack;
+        stack.push(i);
+        pop = false;
+        break;
+      }
+    }
+    if (pop) {
+      state[n->id()] = kVisited;
+      stack.pop();
+      os << "#" << SafeId(n) << ":" << SafeMnemonic(n) << "(";
+      int j = 0;
+      for (Node* const i : n->inputs()) {
+        if (j++ > 0) os << ", ";
+        os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
+      }
+      os << ")" << std::endl;
+    }
+  }
+  return os;
+}
 }
 }
 }  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 12532ba..3dd66ea 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -5,25 +5,80 @@
 #ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
 #define V8_COMPILER_GRAPH_VISUALIZER_H_
 
-#include "src/v8.h"
+#include <iosfwd>
 
 namespace v8 {
 namespace internal {
 
-class OStream;
+class CompilationInfo;
 
 namespace compiler {
 
 class Graph;
+class InstructionSequence;
+class RegisterAllocator;
+class Schedule;
+class SourcePositionTable;
+
 
 struct AsDOT {
   explicit AsDOT(const Graph& g) : graph(g) {}
   const Graph& graph;
 };
 
-OStream& operator<<(OStream& os, const AsDOT& ad);
-}
-}
-}  // namespace v8::internal::compiler
+std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
+
+
+struct AsJSON {
+  explicit AsJSON(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
+
+struct AsRPO {
+  explicit AsRPO(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
+
+
+struct AsC1VCompilation {
+  explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
+  const CompilationInfo* info_;
+};
+
+
+struct AsC1V {
+  AsC1V(const char* phase, const Schedule* schedule,
+        const SourcePositionTable* positions = NULL,
+        const InstructionSequence* instructions = NULL)
+      : schedule_(schedule),
+        instructions_(instructions),
+        positions_(positions),
+        phase_(phase) {}
+  const Schedule* schedule_;
+  const InstructionSequence* instructions_;
+  const SourcePositionTable* positions_;
+  const char* phase_;
+};
+
+struct AsC1VAllocator {
+  explicit AsC1VAllocator(const char* phase,
+                          const RegisterAllocator* allocator = NULL)
+      : phase_(phase), allocator_(allocator) {}
+  const char* phase_;
+  const RegisterAllocator* allocator_;
+};
+
+std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
+std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac);
+std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
+std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GRAPH_VISUALIZER_H_
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index 7b5f228..995046b 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -5,29 +5,42 @@
 #include "src/compiler/graph.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-aux-data-inl.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-Graph::Graph(Zone* zone) : GenericGraph<Node>(zone), decorators_(zone) {}
+Graph::Graph(Zone* zone)
+    : zone_(zone),
+      start_(NULL),
+      end_(NULL),
+      mark_max_(0),
+      next_node_id_(0),
+      decorators_(zone) {}
 
 
-Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs) {
-  DCHECK_LE(op->InputCount(), input_count);
-  Node* result = Node::New(this, input_count, inputs);
-  result->Initialize(op);
+void Graph::Decorate(Node* node) {
   for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
        i != decorators_.end(); ++i) {
-    (*i)->Decorate(result);
+    (*i)->Decorate(node);
+  }
+}
+
+
+Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
+                     bool incomplete) {
+  DCHECK_LE(op->ValueInputCount(), input_count);
+  Node* result = Node::New(this, input_count, inputs, incomplete);
+  result->Initialize(op);
+  if (!incomplete) {
+    Decorate(result);
   }
   return result;
 }
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 07eb02f..d619da2 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -8,7 +8,6 @@
 #include <map>
 #include <set>
 
-#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-aux-data.h"
 #include "src/compiler/source-position.h"
@@ -17,19 +16,21 @@
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
 class GraphDecorator;
 
 
-class Graph : public GenericGraph<Node> {
+class Graph : public ZoneObject {
  public:
   explicit Graph(Zone* zone);
 
   // Base implementation used by all factory methods.
-  Node* NewNode(const Operator* op, int input_count, Node** inputs);
+  Node* NewNode(const Operator* op, int input_count, Node** inputs,
+                bool incomplete = false);
 
   // Factories for nodes with static input counts.
   Node* NewNode(const Operator* op) {
-    return NewNode(op, 0, static_cast<Node**>(NULL));
+    return NewNode(op, 0, static_cast<Node**>(nullptr));
   }
   Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
   Node* NewNode(const Operator* op, Node* n1, Node* n2) {
@@ -54,15 +55,26 @@
     Node* nodes[] = {n1, n2, n3, n4, n5, n6};
     return NewNode(op, arraysize(nodes), nodes);
   }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
 
   template <class Visitor>
-  void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+  inline void VisitNodeInputsFromEnd(Visitor* visitor);
 
-  template <class Visitor>
-  void VisitNodeUsesFromStart(Visitor* visitor);
+  Zone* zone() const { return zone_; }
+  Node* start() const { return start_; }
+  Node* end() const { return end_; }
 
-  template <class Visitor>
-  void VisitNodeInputsFromEnd(Visitor* visitor);
+  void SetStart(Node* start) { start_ = start; }
+  void SetEnd(Node* end) { end_ = end; }
+
+  NodeId NextNodeID() { return next_node_id_++; }
+  NodeId NodeCount() const { return next_node_id_; }
+
+  void Decorate(Node* node);
 
   void AddDecorator(GraphDecorator* decorator) {
     decorators_.push_back(decorator);
@@ -76,10 +88,56 @@
   }
 
  private:
+  template <typename State>
+  friend class NodeMarker;
+
+  Zone* zone_;
+  Node* start_;
+  Node* end_;
+  Mark mark_max_;
+  NodeId next_node_id_;
   ZoneVector<GraphDecorator*> decorators_;
+
+  DISALLOW_COPY_AND_ASSIGN(Graph);
 };
 
 
+// A NodeMarker uses monotonically increasing marks to assign local "states"
+// to nodes. Only one NodeMarker per graph is valid at a given time.
+template <typename State>
+class NodeMarker BASE_EMBEDDED {
+ public:
+  NodeMarker(Graph* graph, uint32_t num_states)
+      : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
+    DCHECK(num_states > 0);         // user error!
+    DCHECK(mark_max_ > mark_min_);  // check for wraparound.
+  }
+
+  State Get(Node* node) {
+    Mark mark = node->mark();
+    if (mark < mark_min_) {
+      mark = mark_min_;
+      node->set_mark(mark_min_);
+    }
+    DCHECK_LT(mark, mark_max_);
+    return static_cast<State>(mark - mark_min_);
+  }
+
+  void Set(Node* node, State state) {
+    Mark local = static_cast<Mark>(state);
+    DCHECK(local < (mark_max_ - mark_min_));
+    DCHECK_LT(node->mark(), mark_max_);
+    node->set_mark(local + mark_min_);
+  }
+
+ private:
+  Mark mark_min_;
+  Mark mark_max_;
+};
+
+
+// A graph decorator can be used to add behavior to the creation of nodes
+// in a graph.
 class GraphDecorator : public ZoneObject {
  public:
   virtual ~GraphDecorator() {}
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index deab7cd..55f7426 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -33,8 +33,6 @@
 
   Operand OutputOperand() { return ToOperand(instr_->Output()); }
 
-  Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
-
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
     if (op->IsRegister()) {
       DCHECK(extra == 0);
@@ -59,6 +57,9 @@
     switch (constant.type()) {
       case Constant::kInt32:
         return Immediate(constant.ToInt32());
+      case Constant::kFloat32:
+        return Immediate(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
       case Constant::kFloat64:
         return Immediate(
             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
@@ -68,44 +69,216 @@
         return Immediate(constant.ToHeapObject());
       case Constant::kInt64:
         break;
+      case Constant::kRpoNumber:
+        return Immediate::CodeRelativeOffset(ToLabel(operand));
     }
     UNREACHABLE();
     return Immediate(-1);
   }
 
-  Operand MemoryOperand(int* first_input) {
-    const int offset = *first_input;
-    switch (AddressingModeField::decode(instr_->opcode())) {
-      case kMode_MR1I:
-        *first_input += 2;
-        return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
-                       times_1,
-                       0);  // TODO(dcarney): K != 0
-      case kMode_MRI:
-        *first_input += 2;
-        return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
-                                                 InputImmediate(offset + 1));
-      case kMode_MI:
-        *first_input += 1;
-        return Operand(InputImmediate(offset + 0));
-      default:
-        UNREACHABLE();
-        return Operand(no_reg);
-    }
+  static int NextOffset(int* offset) {
+    int i = *offset;
+    (*offset)++;
+    return i;
   }
 
-  Operand MemoryOperand() {
-    int first_input = 0;
+  static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+    STATIC_ASSERT(0 == static_cast<int>(times_1));
+    STATIC_ASSERT(1 == static_cast<int>(times_2));
+    STATIC_ASSERT(2 == static_cast<int>(times_4));
+    STATIC_ASSERT(3 == static_cast<int>(times_8));
+    int scale = static_cast<int>(mode - one);
+    DCHECK(scale >= 0 && scale < 4);
+    return static_cast<ScaleFactor>(scale);
+  }
+
+  Operand MemoryOperand(int* offset) {
+    AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+    switch (mode) {
+      case kMode_MR: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = 0;
+        return Operand(base, disp);
+      }
+      case kMode_MRI: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, disp);
+      }
+      case kMode_MR1:
+      case kMode_MR2:
+      case kMode_MR4:
+      case kMode_MR8: {
+        Register base = InputRegister(NextOffset(offset));
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+        int32_t disp = 0;
+        return Operand(base, index, scale, disp);
+      }
+      case kMode_MR1I:
+      case kMode_MR2I:
+      case kMode_MR4I:
+      case kMode_MR8I: {
+        Register base = InputRegister(NextOffset(offset));
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, index, scale, disp);
+      }
+      case kMode_M1:
+      case kMode_M2:
+      case kMode_M4:
+      case kMode_M8: {
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_M1, mode);
+        int32_t disp = 0;
+        return Operand(index, scale, disp);
+      }
+      case kMode_M1I:
+      case kMode_M2I:
+      case kMode_M4I:
+      case kMode_M8I: {
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(index, scale, disp);
+      }
+      case kMode_MI: {
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(Immediate(disp));
+      }
+      case kMode_None:
+        UNREACHABLE();
+        return Operand(no_reg, 0);
+    }
+    UNREACHABLE();
+    return Operand(no_reg, 0);
+  }
+
+  Operand MemoryOperand(int first_input = 0) {
     return MemoryOperand(&first_input);
   }
 };
 
 
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
 
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ xor_(result_, result_); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineLoadFloat FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+  XMMRegister const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+ public:
+  OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+                             XMMRegister input)
+      : OutOfLineCode(gen), result_(result), input_(input) {}
+
+  void Generate() FINAL {
+    __ sub(esp, Immediate(kDoubleSize));
+    __ movsd(MemOperand(esp, 0), input_);
+    __ SlowTruncateToI(result_, esp, 0);
+    __ add(esp, Immediate(kDoubleSize));
+  }
+
+ private:
+  Register const result_;
+  XMMRegister const input_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                          \
+  do {                                                                  \
+    auto result = i.OutputDoubleRegister();                             \
+    auto offset = i.InputRegister(0);                                   \
+    if (instr->InputAt(1)->IsRegister()) {                              \
+      __ cmp(offset, i.InputRegister(1));                               \
+    } else {                                                            \
+      __ cmp(offset, i.InputImmediate(1));                              \
+    }                                                                   \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+    __ j(above_equal, ool->entry());                                    \
+    __ asm_instr(result, i.MemoryOperand(2));                           \
+    __ bind(ool->exit());                                               \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
+  do {                                                                    \
+    auto result = i.OutputRegister();                                     \
+    auto offset = i.InputRegister(0);                                     \
+    if (instr->InputAt(1)->IsRegister()) {                                \
+      __ cmp(offset, i.InputRegister(1));                                 \
+    } else {                                                              \
+      __ cmp(offset, i.InputImmediate(1));                                \
+    }                                                                     \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+    __ j(above_equal, ool->entry());                                      \
+    __ asm_instr(result, i.MemoryOperand(2));                             \
+    __ bind(ool->exit());                                                 \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                 \
+  do {                                                          \
+    auto offset = i.InputRegister(0);                           \
+    if (instr->InputAt(1)->IsRegister()) {                      \
+      __ cmp(offset, i.InputRegister(1));                       \
+    } else {                                                    \
+      __ cmp(offset, i.InputImmediate(1));                      \
+    }                                                           \
+    Label done;                                                 \
+    __ j(above_equal, &done, Label::kNear);                     \
+    __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
+    __ bind(&done);                                             \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)            \
+  do {                                                       \
+    auto offset = i.InputRegister(0);                        \
+    if (instr->InputAt(1)->IsRegister()) {                   \
+      __ cmp(offset, i.InputRegister(1));                    \
+    } else {                                                 \
+      __ cmp(offset, i.InputImmediate(1));                   \
+    }                                                        \
+    Label done;                                              \
+    __ j(above_equal, &done, Label::kNear);                  \
+    if (instr->InputAt(2)->IsRegister()) {                   \
+      __ asm_instr(i.MemoryOperand(3), i.InputRegister(2));  \
+    } else {                                                 \
+      __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+    }                                                        \
+    __ bind(&done);                                          \
+  } while (false)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   IA32OperandConverter i(this, instr);
@@ -136,7 +309,7 @@
       break;
     }
     case kArchJmp:
-      __ jmp(code()->GetLabel(i.InputBlock(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -144,9 +317,19 @@
     case kArchRet:
       AssembleReturn();
       break;
-    case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), esp);
       break;
+    case kArchTruncateDoubleToI: {
+      auto result = i.OutputRegister();
+      auto input = i.InputDoubleRegister(0);
+      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      __ cvttsd2si(result, Operand(input));
+      __ cmp(result, 1);
+      __ j(overflow, ool->entry());
+      __ bind(ool->exit());
+      break;
+    }
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -182,12 +365,18 @@
         __ imul(i.OutputRegister(), i.InputOperand(1));
       }
       break;
+    case kIA32ImulHigh:
+      __ imul(i.InputRegister(1));
+      break;
+    case kIA32UmulHigh:
+      __ mul(i.InputRegister(1));
+      break;
     case kIA32Idiv:
       __ cdq();
       __ idiv(i.InputOperand(1));
       break;
     case kIA32Udiv:
-      __ xor_(edx, edx);
+      __ Move(edx, Immediate(0));
       __ div(i.InputOperand(1));
       break;
     case kIA32Not:
@@ -219,46 +408,46 @@
       break;
     case kIA32Shl:
       if (HasImmediateInput(instr, 1)) {
-        __ shl(i.OutputRegister(), i.InputInt5(1));
+        __ shl(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ shl_cl(i.OutputRegister());
+        __ shl_cl(i.OutputOperand());
       }
       break;
     case kIA32Shr:
       if (HasImmediateInput(instr, 1)) {
-        __ shr(i.OutputRegister(), i.InputInt5(1));
+        __ shr(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ shr_cl(i.OutputRegister());
+        __ shr_cl(i.OutputOperand());
       }
       break;
     case kIA32Sar:
       if (HasImmediateInput(instr, 1)) {
-        __ sar(i.OutputRegister(), i.InputInt5(1));
+        __ sar(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ sar_cl(i.OutputRegister());
+        __ sar_cl(i.OutputOperand());
       }
       break;
     case kIA32Ror:
       if (HasImmediateInput(instr, 1)) {
-        __ ror(i.OutputRegister(), i.InputInt5(1));
+        __ ror(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ ror_cl(i.OutputRegister());
+        __ ror_cl(i.OutputOperand());
       }
       break;
     case kSSEFloat64Cmp:
       __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
       break;
     case kSSEFloat64Add:
-      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
       break;
     case kSSEFloat64Sub:
-      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
       break;
     case kSSEFloat64Mul:
-      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
       break;
     case kSSEFloat64Div:
-      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
       break;
     case kSSEFloat64Mod: {
       // TODO(dcarney): alignment is wrong.
@@ -288,6 +477,30 @@
     case kSSEFloat64Sqrt:
       __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kSSEFloat64Floor: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundDown);
+      break;
+    }
+    case kSSEFloat64Ceil: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundUp);
+      break;
+    }
+    case kSSEFloat64RoundTruncate: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundToZero);
+      break;
+    }
+    case kSSECvtss2sd:
+      __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSECvtsd2ss:
+      __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
     case kSSEFloat64ToInt32:
       __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
       break;
@@ -303,9 +516,32 @@
       __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
     case kSSEUint32ToFloat64:
-      // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
-      __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
+      __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kAVXFloat64Add: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Sub: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Mul: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Div: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
     case kIA32Movsxbl:
       __ movsx_b(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -363,14 +599,47 @@
     case kIA32Movss:
       if (instr->HasOutput()) {
         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
-        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       } else {
         int index = 0;
         Operand operand = i.MemoryOperand(&index);
-        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
-        __ movss(operand, xmm0);
+        __ movss(operand, i.InputDoubleRegister(index));
       }
       break;
+    case kIA32Lea: {
+      AddressingMode mode = AddressingModeField::decode(instr->opcode());
+      // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+      // and addressing mode just happens to work out. The "addl"/"subl" forms
+      // in these cases are faster based on measurements.
+      if (mode == kMode_MI) {
+        __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
+      } else if (i.InputRegister(0).is(i.OutputRegister())) {
+        if (mode == kMode_MRI) {
+          int32_t constant_summand = i.InputInt32(1);
+          if (constant_summand > 0) {
+            __ add(i.OutputRegister(), Immediate(constant_summand));
+          } else if (constant_summand < 0) {
+            __ sub(i.OutputRegister(), Immediate(-constant_summand));
+          }
+        } else if (mode == kMode_MR1) {
+          if (i.InputRegister(1).is(i.OutputRegister())) {
+            __ shl(i.OutputRegister(), 1);
+          } else {
+            __ lea(i.OutputRegister(), i.MemoryOperand());
+          }
+        } else if (mode == kMode_M2) {
+          __ shl(i.OutputRegister(), 1);
+        } else if (mode == kMode_M4) {
+          __ shl(i.OutputRegister(), 2);
+        } else if (mode == kMode_M8) {
+          __ shl(i.OutputRegister(), 3);
+        } else {
+          __ lea(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        __ lea(i.OutputRegister(), i.MemoryOperand());
+      }
+      break;
+    }
     case kIA32Push:
       if (HasImmediateInput(instr, 0)) {
         __ push(i.InputImmediate(0));
@@ -384,31 +653,59 @@
       Register value = i.InputRegister(2);
       __ mov(Operand(object, index, times_1, 0), value);
       __ lea(index, Operand(object, index, times_1, 0));
-      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
-                                ? kSaveFPRegs
-                                : kDontSaveFPRegs;
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
       __ RecordWrite(object, index, value, mode);
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+      break;
   }
 }
 
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   IA32OperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = code()->GetLabel(tblock);
-  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
-  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
-  switch (condition) {
+  Label::Distance flabel_distance =
+      branch->fallthru ? Label::kNear : Label::kFar;
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
     // Fall through.
@@ -464,8 +761,13 @@
       __ j(no_overflow, tlabel);
       break;
   }
-  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
-  __ bind(&done);
+  // Add a jump if not falling through to the next block.
+  if (!branch->fallthru) __ jmp(flabel);
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
 
@@ -484,7 +786,7 @@
   switch (condition) {
     case kUnorderedEqual:
       __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(0));
+      __ Move(reg, Immediate(0));
       __ jmp(&done, Label::kNear);
     // Fall through.
     case kEqual:
@@ -512,7 +814,7 @@
       break;
     case kUnorderedLessThan:
       __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(0));
+      __ Move(reg, Immediate(0));
       __ jmp(&done, Label::kNear);
     // Fall through.
     case kUnsignedLessThan:
@@ -528,7 +830,7 @@
       break;
     case kUnorderedLessThanOrEqual:
       __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(0));
+      __ Move(reg, Immediate(0));
       __ jmp(&done, Label::kNear);
     // Fall through.
     case kUnsignedLessThanOrEqual:
@@ -558,7 +860,7 @@
     // Emit a branch to set a register to either 1 or 0.
     Label set;
     __ j(cc, &set, Label::kNear);
-    __ mov(reg, Immediate(0));
+    __ Move(reg, Immediate(0));
     __ jmp(&done, Label::kNear);
     __ bind(&set);
     __ mov(reg, Immediate(1));
@@ -704,7 +1006,7 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  Frame* frame = code_->frame();
+  Frame* frame = this->frame();
   int stack_slots = frame->GetSpillSlotCount();
   if (descriptor->kind() == CallDescriptor::kCallAddress) {
     // Assemble a prologue similar the to cdecl calling convention.
@@ -721,28 +1023,10 @@
       frame->SetRegisterSaveAreaSize(register_save_area_size);
     }
   } else if (descriptor->IsJSFunctionCall()) {
-    CompilationInfo* info = linkage()->info();
+    CompilationInfo* info = this->info();
     __ Prologue(info->IsCodePreAgingActive());
     frame->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
-      __ cmp(ecx, isolate()->factory()->undefined_value());
-      __ j(not_equal, &ok, Label::kNear);
-      __ mov(ecx, GlobalObjectOperand());
-      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-      __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame->SetRegisterSaveAreaSize(
@@ -831,24 +1115,35 @@
       }
     } else if (destination->IsRegister()) {
       Register dst = g.ToRegister(destination);
-      __ mov(dst, g.ToImmediate(source));
+      __ Move(dst, g.ToImmediate(source));
     } else if (destination->IsStackSlot()) {
       Operand dst = g.ToOperand(destination);
-      __ mov(dst, g.ToImmediate(source));
-    } else {
-      double v = g.ToDouble(source);
-      uint64_t int_val = bit_cast<uint64_t, double>(v);
-      int32_t lower = static_cast<int32_t>(int_val);
-      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      __ Move(dst, g.ToImmediate(source));
+    } else if (src_constant.type() == Constant::kFloat32) {
+      // TODO(turbofan): Can we do better here?
+      uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
       if (destination->IsDoubleRegister()) {
         XMMRegister dst = g.ToDoubleRegister(destination);
-        __ Move(dst, v);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        Operand dst = g.ToOperand(destination);
+        __ Move(dst, Immediate(src));
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src_constant.type());
+      uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+      uint32_t lower = static_cast<uint32_t>(src);
+      uint32_t upper = static_cast<uint32_t>(src >> 32);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, src);
       } else {
         DCHECK(destination->IsDoubleStackSlot());
         Operand dst0 = g.ToOperand(destination);
         Operand dst1 = g.HighOperand(destination);
-        __ mov(dst0, Immediate(lower));
-        __ mov(dst1, Immediate(upper));
+        __ Move(dst0, Immediate(lower));
+        __ Move(dst1, Immediate(upper));
       }
     }
   } else if (source->IsDoubleRegister()) {
@@ -908,7 +1203,7 @@
     __ movaps(xmm0, src);
     __ movaps(src, dst);
     __ movaps(dst, xmm0);
-  } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
     // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister reg = g.ToDoubleRegister(source);
@@ -940,7 +1235,7 @@
 
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
+  if (!info()->IsStub()) {
     // Ensure that we have enough space after the previous lazy-bailout
     // instruction for patching the code here.
     int current_pc = masm()->pc_offset();
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 0f46088..ec9fd18 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -20,6 +20,8 @@
   V(IA32Xor)                       \
   V(IA32Sub)                       \
   V(IA32Imul)                      \
+  V(IA32ImulHigh)                  \
+  V(IA32UmulHigh)                  \
   V(IA32Idiv)                      \
   V(IA32Udiv)                      \
   V(IA32Not)                       \
@@ -35,10 +37,19 @@
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
   V(SSEFloat64Sqrt)                \
+  V(SSEFloat64Floor)               \
+  V(SSEFloat64Ceil)                \
+  V(SSEFloat64RoundTruncate)       \
+  V(SSECvtss2sd)                   \
+  V(SSECvtsd2ss)                   \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
+  V(AVXFloat64Add)                 \
+  V(AVXFloat64Sub)                 \
+  V(AVXFloat64Mul)                 \
+  V(AVXFloat64Div)                 \
   V(IA32Movsxbl)                   \
   V(IA32Movzxbl)                   \
   V(IA32Movb)                      \
@@ -48,6 +59,7 @@
   V(IA32Movl)                      \
   V(IA32Movss)                     \
   V(IA32Movsd)                     \
+  V(IA32Lea)                       \
   V(IA32Push)                      \
   V(IA32StoreWriteBarrier)
 
@@ -59,23 +71,31 @@
 //
 // We use the following local notation for addressing modes:
 //
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MR = [register]
-// MI = [immediate]
-// MRN = [register + register * N in {1, 2, 4, 8}]
-// MRI = [register + immediate]
-// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (int32_t)
+
 #define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MI)   /* [K] */                    \
-  V(MR)   /* [%r0] */                  \
-  V(MRI)  /* [%r0 + K] */              \
-  V(MR1I) /* [%r0 + %r1 * 1 + K] */    \
-  V(MR2I) /* [%r0 + %r1 * 2 + K] */    \
-  V(MR4I) /* [%r0 + %r1 * 4 + K] */    \
-  V(MR8I) /* [%r0 + %r1 * 8 + K] */
+  V(MR)   /* [%r1            ] */      \
+  V(MRI)  /* [%r1         + K] */      \
+  V(MR1)  /* [%r1 + %r2*1    ] */      \
+  V(MR2)  /* [%r1 + %r2*2    ] */      \
+  V(MR4)  /* [%r1 + %r2*4    ] */      \
+  V(MR8)  /* [%r1 + %r2*8    ] */      \
+  V(MR1I) /* [%r1 + %r2*1 + K] */      \
+  V(MR2I) /* [%r1 + %r2*2 + K] */      \
+  V(MR4I) /* [%r1 + %r2*3 + K] */      \
+  V(MR8I) /* [%r1 + %r2*4 + K] */      \
+  V(M1)   /* [      %r2*1    ] */      \
+  V(M2)   /* [      %r2*2    ] */      \
+  V(M4)   /* [      %r2*4    ] */      \
+  V(M8)   /* [      %r2*8    ] */      \
+  V(M1I)  /* [      %r2*1 + K] */      \
+  V(M2I)  /* [      %r2*2 + K] */      \
+  V(M4I)  /* [      %r2*4 + K] */      \
+  V(M8I)  /* [      %r2*8 + K] */      \
+  V(MI)   /* [              K] */
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/ia32/instruction-selector-ia32-unittest.cc b/src/compiler/ia32/instruction-selector-ia32-unittest.cc
deleted file mode 100644
index 60708c1..0000000
--- a/src/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-// Immediates (random subset).
-static const int32_t kImmediates[] = {
-    kMinInt, -42, -1, 0,  1,  2,    3,      4,          5,
-    6,       7,   8,  16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
-
-}  // namespace
-
-
-TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    }
-    {
-      StreamBuilder m(this, kMachInt32, kMachInt32);
-      m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-    }
-  }
-}
-
-
-TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Loads and stores
-
-namespace {
-
-struct MemoryAccess {
-  MachineType type;
-  ArchOpcode load_opcode;
-  ArchOpcode store_opcode;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
-  OStringStream ost;
-  ost << memacc.type;
-  return os << ost.c_str();
-}
-
-
-static const MemoryAccess kMemoryAccesses[] = {
-    {kMachInt8, kIA32Movsxbl, kIA32Movb},
-    {kMachUint8, kIA32Movzxbl, kIA32Movb},
-    {kMachInt16, kIA32Movsxwl, kIA32Movw},
-    {kMachUint16, kIA32Movzxwl, kIA32Movw},
-    {kMachInt32, kIA32Movl, kIA32Movl},
-    {kMachUint32, kIA32Movl, kIA32Movl},
-    {kMachFloat32, kIA32Movss, kIA32Movss},
-    {kMachFloat64, kIA32Movsd, kIA32Movsd}};
-
-}  // namespace
-
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
-    InstructionSelectorMemoryAccessTest;
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
-  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, base, kImmediates) {
-    StreamBuilder m(this, memacc.type, kMachPtr);
-    m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, kImmediates) {
-    StreamBuilder m(this, memacc.type, kMachPtr);
-    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(1U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
-  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(3U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, base, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
-    m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(0U, s[0]->OutputCount());
-  }
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
-  const MemoryAccess memacc = GetParam();
-  TRACED_FOREACH(int32_t, index, kImmediates) {
-    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
-    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
-            m.Parameter(1));
-    m.Return(m.Int32Constant(0));
-    Stream s = m.Build();
-    ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
-    ASSERT_EQ(3U, s[0]->InputCount());
-    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
-    EXPECT_EQ(0U, s[0]->OutputCount());
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorMemoryAccessTest,
-                        ::testing::ValuesIn(kMemoryAccesses));
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 24ebc38..16063ab 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -37,15 +37,98 @@
         return false;
     }
   }
+
+  AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
+                                             Node* displacement_node,
+                                             InstructionOperand* inputs[],
+                                             size_t* input_count) {
+    AddressingMode mode = kMode_MRI;
+    int32_t displacement = (displacement_node == NULL)
+                               ? 0
+                               : OpParameter<int32_t>(displacement_node);
+    if (base != NULL) {
+      if (base->opcode() == IrOpcode::kInt32Constant) {
+        displacement += OpParameter<int32_t>(base);
+        base = NULL;
+      }
+    }
+    if (base != NULL) {
+      inputs[(*input_count)++] = UseRegister(base);
+      if (index != NULL) {
+        DCHECK(scale >= 0 && scale <= 3);
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != 0) {
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+                                                       kMode_MR4I, kMode_MR8I};
+          mode = kMRnI_modes[scale];
+        } else {
+          static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+                                                      kMode_MR4, kMode_MR8};
+          mode = kMRn_modes[scale];
+        }
+      } else {
+        if (displacement == 0) {
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          mode = kMode_MRI;
+        }
+      }
+    } else {
+      DCHECK(scale >= 0 && scale <= 3);
+      if (index != NULL) {
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != 0) {
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                      kMode_M4I, kMode_M8I};
+          mode = kMnI_modes[scale];
+        } else {
+          static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
+                                                     kMode_M4, kMode_M8};
+          mode = kMn_modes[scale];
+        }
+      } else {
+        inputs[(*input_count)++] = TempImmediate(displacement);
+        return kMode_MI;
+      }
+    }
+    return mode;
+  }
+
+  AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
+                                                  InstructionOperand* inputs[],
+                                                  size_t* input_count) {
+    BaseWithIndexAndDisplacement32Matcher m(node, true);
+    DCHECK(m.matches());
+    if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+                                         m.displacement(), inputs, input_count);
+    } else {
+      inputs[(*input_count)++] = UseRegister(node->InputAt(0));
+      inputs[(*input_count)++] = UseRegister(node->InputAt(1));
+      return kMode_MR1;
+    }
+  }
+
+  bool CanBeBetterLeftOperand(Node* node) const {
+    return !selector()->IsLive(node);
+  }
 };
 
 
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
-  IA32OperandGenerator g(this);
-  Node* base = node->InputAt(0);
-  Node* index = node->InputAt(1);
 
   ArchOpcode opcode;
   // TODO(titzer): signed/unsigned small loads
@@ -71,23 +154,16 @@
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(base)) {
-    if (Int32Matcher(index).Is(0)) {  // load [#base + #0]
-      Emit(opcode | AddressingModeField::encode(kMode_MI),
-           g.DefineAsRegister(node), g.UseImmediate(base));
-    } else {  // load [#base + %index]
-      Emit(opcode | AddressingModeField::encode(kMode_MRI),
-           g.DefineAsRegister(node), g.UseRegister(index),
-           g.UseImmediate(base));
-    }
-  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
-  } else {  // load [%base + %index + K]
-    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
-  }
-  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+  IA32OperandGenerator g(this);
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand* inputs[3];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  Emit(code, 1, outputs, input_count, inputs);
 }
 
 
@@ -111,14 +187,7 @@
     return;
   }
   DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-  InstructionOperand* val;
-  if (g.CanBeImmediate(value)) {
-    val = g.UseImmediate(value);
-  } else if (rep == kRepWord8 || rep == kRepBit) {
-    val = g.UseByteRegister(value);
-  } else {
-    val = g.UseRegister(value);
-  }
+
   ArchOpcode opcode;
   switch (rep) {
     case kRepFloat32:
@@ -142,22 +211,114 @@
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(base)) {
-    if (Int32Matcher(index).Is(0)) {  // store [#base], %|#value
-      Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
-           g.UseImmediate(base), val);
-    } else {  // store [#base + %index], %|#value
-      Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
-           g.UseRegister(index), g.UseImmediate(base), val);
-    }
-  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
-         g.UseRegister(base), g.UseImmediate(index), val);
-  } else {  // store [%base + %index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
-         g.UseRegister(base), g.UseRegister(index), val);
+
+  InstructionOperand* val;
+  if (g.CanBeImmediate(value)) {
+    val = g.UseImmediate(value);
+  } else if (rep == kRepWord8 || rep == kRepBit) {
+    val = g.UseByteRegister(value);
+  } else {
+    val = g.UseRegister(value);
   }
-  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  inputs[input_count++] = val;
+  Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  IA32OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  if (g.CanBeImmediate(buffer)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), offset_operand, length_operand,
+         offset_operand, g.UseImmediate(buffer));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MR1),
+         g.DefineAsRegister(node), offset_operand, length_operand,
+         g.UseRegister(buffer), offset_operand);
+  }
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  IA32OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value)
+          ? g.UseImmediate(value)
+          : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
+                                                  : g.UseRegister(value));
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  if (g.CanBeImmediate(buffer)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+         offset_operand, length_operand, value_operand, offset_operand,
+         g.UseImmediate(buffer));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+         offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+         offset_operand);
+  }
 }
 
 
@@ -166,20 +327,35 @@
                        InstructionCode opcode, FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
   Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
   InstructionOperand* inputs[4];
   size_t input_count = 0;
   InstructionOperand* outputs[2];
   size_t output_count = 0;
 
   // TODO(turbofan): match complex addressing modes.
-  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
-  // this might be the last use and therefore its register can be reused.
-  if (g.CanBeImmediate(m.right().node())) {
-    inputs[input_count++] = g.Use(m.left().node());
-    inputs[input_count++] = g.UseImmediate(m.right().node());
+  if (left == right) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov eax, [ebp-0x10]
+    //   add eax, [ebp-0x10]
+    //   jo label
+    InstructionOperand* const input = g.UseRegister(left);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (g.CanBeImmediate(right)) {
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.UseImmediate(right);
   } else {
-    inputs[input_count++] = g.UseRegister(m.left().node());
-    inputs[input_count++] = g.Use(m.right().node());
+    if (node->op()->HasProperty(Operator::kCommutative) &&
+        g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.Use(right);
   }
 
   if (cont->IsBranch()) {
@@ -226,7 +402,7 @@
   IA32OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kIA32Xor);
   }
@@ -240,25 +416,73 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int32BinopMatcher m(node);
-    if (m.right().IsWord32And()) {
-      Int32BinopMatcher mright(right);
-      if (mright.right().Is(0x1F)) {
-        right = mright.left().node();
-      }
-    }
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, ecx));
   }
 }
 
 
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+                  ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUniqueRegister(node->InputAt(1)));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(edx)};
+  selector->Emit(opcode, g.DefineAsFixed(node, eax),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)));
+}
+
+void EmitLea(InstructionSelector* selector, Node* result, Node* index,
+             int scale, Node* base, Node* displacement) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode = g.GenerateMemoryOperandInputs(
+      index, scale, base, displacement, inputs, &input_count);
+
+  DCHECK_NE(0, static_cast<int>(input_count));
+  DCHECK_GE(arraysize(inputs), input_count);
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(result);
+
+  InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
+
+  selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitWord32Shl(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitShift(this, node, kIA32Shl);
 }
 
@@ -279,6 +503,29 @@
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // Try to match the Add to a lea pattern
+  BaseWithIndexAndDisplacement32Matcher m(node);
+  if (m.matches() &&
+      (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+    InstructionOperand* inputs[4];
+    size_t input_count = 0;
+    AddressingMode mode = g.GenerateMemoryOperandInputs(
+        m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+
+    DCHECK_NE(0, static_cast<int>(input_count));
+    DCHECK_GE(arraysize(inputs), input_count);
+
+    InstructionOperand* outputs[1];
+    outputs[0] = g.DefineAsRegister(node);
+
+    InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
+    Emit(opcode, 1, outputs, input_count, inputs);
+    return;
+  }
+
+  // No lea pattern match, use add
   VisitBinop(this, node, kIA32Add);
 }
 
@@ -295,31 +542,36 @@
 
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, node, index, m.scale(), base, NULL);
+    return;
+  }
   IA32OperandGenerator g(this);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
   if (g.CanBeImmediate(right)) {
     Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
          g.UseImmediate(right));
-  } else if (g.CanBeImmediate(left)) {
-    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
-         g.UseImmediate(left));
   } else {
-    // TODO(turbofan): select better left operand.
+    if (g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
     Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
          g.Use(right));
   }
 }
 
 
-static inline void VisitDiv(InstructionSelector* selector, Node* node,
-                            ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  InstructionOperand* temps[] = {g.TempRegister(edx)};
-  size_t temp_count = arraysize(temps);
-  selector->Emit(opcode, g.DefineAsFixed(node, eax),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kIA32ImulHigh);
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kIA32UmulHigh);
 }
 
 
@@ -328,32 +580,27 @@
 }
 
 
-void InstructionSelector::VisitInt32UDiv(Node* node) {
+void InstructionSelector::VisitUint32Div(Node* node) {
   VisitDiv(this, node, kIA32Udiv);
 }
 
 
-static inline void VisitMod(InstructionSelector* selector, Node* node,
-                            ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
-  size_t temp_count = arraysize(temps);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)), temp_count, temps);
-}
-
-
 void InstructionSelector::VisitInt32Mod(Node* node) {
   VisitMod(this, node, kIA32Idiv);
 }
 
 
-void InstructionSelector::VisitInt32UMod(Node* node) {
+void InstructionSelector::VisitUint32Mod(Node* node) {
   VisitMod(this, node, kIA32Udiv);
 }
 
 
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   IA32OperandGenerator g(this);
   Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -362,9 +609,7 @@
 
 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
   IA32OperandGenerator g(this);
-  // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
-  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
 
@@ -380,31 +625,57 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitFloat64Add(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Add, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Div(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Div, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
@@ -423,104 +694,44 @@
 }
 
 
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kIA32Add, cont);
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Floor, node);
 }
 
 
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kIA32Sub, cont);
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Ceil, node);
 }
 
 
-// Shared routine for multiple compare operations.
-static inline void VisitCompare(InstructionSelector* selector,
-                                InstructionCode opcode,
-                                InstructionOperand* left,
-                                InstructionOperand* right,
-                                FlagsContinuation* cont) {
-  IA32OperandGenerator g(selector);
-  if (cont->IsBranch()) {
-    selector->Emit(cont->Encode(opcode), NULL, left, right,
-                   g.Label(cont->true_block()),
-                   g.Label(cont->false_block()))->MarkAsControl();
-  } else {
-    DCHECK(cont->IsSet());
-    // TODO(titzer): Needs byte register.
-    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
-                   left, right);
-  }
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
 }
 
 
-// Shared routine for multiple word compare operations.
-static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
-                                    InstructionCode opcode,
-                                    FlagsContinuation* cont, bool commutative) {
-  IA32OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
-    if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
-  }
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
 }
 
 
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kInt32Sub:
-      return VisitWordCompare(this, node, kIA32Cmp, cont, false);
-    case IrOpcode::kWord32And:
-      return VisitWordCompare(this, node, kIA32Test, cont, true);
-    default:
-      break;
-  }
-
+void InstructionSelector::VisitCall(Node* node) {
   IA32OperandGenerator g(this);
-  VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
-}
-
-
-void InstructionSelector::VisitWord32Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kIA32Cmp, cont, false);
-}
-
-
-void InstructionSelector::VisitFloat64Compare(Node* node,
-                                              FlagsContinuation* cont) {
-  IA32OperandGenerator g(this);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
-}
-
-
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
-                                    BasicBlock* deoptimization) {
-  IA32OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
 
   if (descriptor->NeedsFrameState()) {
     frame_state_descriptor =
-        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
   }
 
   CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
 
   // Compute InstructionOperands for inputs and outputs.
-  InitializeCallBuffer(call, &buffer, true, true);
+  InitializeCallBuffer(node, &buffer, true, true);
 
   // Push any stack arguments.
   for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
@@ -547,17 +758,254 @@
   opcode |= MiscField::encode(descriptor->flags());
 
   // Emit the call instruction.
+  InstructionOperand** first_output =
+      buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
   Instruction* call_instr =
-      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+      Emit(opcode, buffer.outputs.size(), first_output,
            buffer.instruction_args.size(), &buffer.instruction_args.front());
-
   call_instr->MarkAsCall();
-  if (deoptimization != NULL) {
-    DCHECK(continuation != NULL);
-    call_instr->MarkAsControl();
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand* left, InstructionOperand* right,
+                  FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(opcode), NULL, left, right,
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    // TODO(titzer): Needs byte register.
+    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+                   left, right);
   }
 }
 
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  Node* left, Node* right, FlagsContinuation* cont,
+                  bool commutative) {
+  IA32OperandGenerator g(selector);
+  if (commutative && g.CanBeBetterLeftOperand(right)) {
+    std::swap(left, right);
+  }
+  VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
+               cont, node->op()->HasProperty(Operator::kCommutative));
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  Node* const left = node->InputAt(0);
+  Node* const right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, left, right, cont,
+                 node->op()->HasProperty(Operator::kCommutative));
+  }
+}
+
+
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kIA32Cmp, cont);
+}
+
+
+// Shared routine for word comparison with zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  // Try to combine the branch with a comparison.
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Try to combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kIA32Add, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kIA32Sub, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Sub:
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kIA32Test, cont);
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Continuation could not be combined with a compare, emit compare against 0.
+  IA32OperandGenerator g(selector);
+  VisitCompare(selector, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
+}
+
+}  // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kIA32Add, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kIA32Add, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kIA32Sub, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kIA32Sub, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate |
+           MachineOperatorBuilder::kWord32ShiftIsSafe;
+  }
+  return MachineOperatorBuilder::Flag::kNoFlags;
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
index f2c5fab..12cc34f 100644
--- a/src/compiler/ia32/linkage-ia32.cc
+++ b/src/compiler/ia32/linkage-ia32.cc
@@ -30,8 +30,9 @@
 
 typedef LinkageHelper<IA32LinkageHelperTraits> LH;
 
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }
 
 
@@ -44,10 +45,10 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 2d921bd..ea17854 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -5,12 +5,18 @@
 #ifndef V8_COMPILER_INSTRUCTION_CODES_H_
 #define V8_COMPILER_INSTRUCTION_CODES_H_
 
+#include <iosfwd>
+
 #if V8_TARGET_ARCH_ARM
 #include "src/compiler/arm/instruction-codes-arm.h"
 #elif V8_TARGET_ARCH_ARM64
 #include "src/compiler/arm64/instruction-codes-arm64.h"
 #elif V8_TARGET_ARCH_IA32
 #include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/compiler/mips/instruction-codes-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/compiler/mips64/instruction-codes-mips64.h"
 #elif V8_TARGET_ARCH_X64
 #include "src/compiler/x64/instruction-codes-x64.h"
 #else
@@ -21,9 +27,6 @@
 
 namespace v8 {
 namespace internal {
-
-class OStream;
-
 namespace compiler {
 
 // Target-specific opcodes that specify which assembly sequence to emit.
@@ -34,7 +37,20 @@
   V(ArchJmp)                \
   V(ArchNop)                \
   V(ArchRet)                \
+  V(ArchStackPointer)       \
   V(ArchTruncateDoubleToI)  \
+  V(CheckedLoadInt8)        \
+  V(CheckedLoadUint8)       \
+  V(CheckedLoadInt16)       \
+  V(CheckedLoadUint16)      \
+  V(CheckedLoadWord32)      \
+  V(CheckedLoadFloat32)     \
+  V(CheckedLoadFloat64)     \
+  V(CheckedStoreWord8)      \
+  V(CheckedStoreWord16)     \
+  V(CheckedStoreWord32)     \
+  V(CheckedStoreFloat32)    \
+  V(CheckedStoreFloat64)    \
   TARGET_ARCH_OPCODE_LIST(V)
 
 enum ArchOpcode {
@@ -46,7 +62,7 @@
 #undef COUNT_ARCH_OPCODE
 };
 
-OStream& operator<<(OStream& os, const ArchOpcode& ao);
+std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao);
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
@@ -65,12 +81,12 @@
 #undef COUNT_ADDRESSING_MODE
 };
 
-OStream& operator<<(OStream& os, const AddressingMode& am);
+std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
 
 // The mode of the flags continuation (see below).
 enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
 
-OStream& operator<<(OStream& os, const FlagsMode& fm);
+std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
 
 // The condition of flags continuation (see below).
 enum FlagsCondition {
@@ -94,7 +110,11 @@
   kNotOverflow
 };
 
-OStream& operator<<(OStream& os, const FlagsCondition& fc);
+inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
+  return static_cast<FlagsCondition>(condition ^ 1);
+}
+
+std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
 
 // The InstructionCode is an opaque, target-specific integer that encodes
 // what code to emit for an instruction in the code generator. It is not
@@ -107,10 +127,10 @@
 // continuation into a single InstructionCode which is stored as part of
 // the instruction.
 typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
-typedef BitField<AddressingMode, 7, 4> AddressingModeField;
-typedef BitField<FlagsMode, 11, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
-typedef BitField<int, 13, 19> MiscField;
+typedef BitField<AddressingMode, 7, 5> AddressingModeField;
+typedef BitField<FlagsMode, 12, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 14, 5> FlagsConditionField;
+typedef BitField<int, 14, 18> MiscField;
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index d00109e..bdcd952 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -8,6 +8,7 @@
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/linkage.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -44,8 +45,9 @@
 
   InstructionOperand* DefineAsConstant(Node* node) {
     selector()->MarkAsDefined(node);
-    sequence()->AddConstant(node->id(), ToConstant(node));
-    return ConstantOperand::Create(node->id(), zone());
+    int virtual_register = selector_->GetVirtualRegister(node);
+    sequence()->AddConstant(virtual_register, ToConstant(node));
+    return ConstantOperand::Create(virtual_register, zone());
   }
 
   InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
@@ -54,9 +56,9 @@
   }
 
   InstructionOperand* Use(Node* node) {
-    return Use(node,
-               new (zone()) UnallocatedOperand(
-                   UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+    return Use(
+        node, new (zone()) UnallocatedOperand(
+                  UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START));
   }
 
   InstructionOperand* UseRegister(Node* node) {
@@ -68,7 +70,7 @@
   // Use register or operand for the node. If a register is chosen, it won't
   // alias any temporary or output registers.
   InstructionOperand* UseUnique(Node* node) {
-    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::NONE));
   }
 
   // Use a unique register for the node that does not alias any temporary or
@@ -127,13 +129,18 @@
     return ImmediateOperand::Create(index, zone());
   }
 
+  InstructionOperand* TempLocation(LinkageLocation location, MachineType type) {
+    UnallocatedOperand* op = ToUnallocatedOperand(location, type);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    return op;
+  }
+
   InstructionOperand* Label(BasicBlock* block) {
-    // TODO(bmeurer): We misuse ImmediateOperand here.
-    return TempImmediate(block->id());
+    int index = sequence()->AddImmediate(Constant(block->GetRpoNumber()));
+    return ImmediateOperand::Create(index, zone());
   }
 
  protected:
-  Graph* graph() const { return selector()->graph(); }
   InstructionSelector* selector() const { return selector_; }
   InstructionSequence* sequence() const { return selector()->sequence(); }
   Isolate* isolate() const { return zone()->isolate(); }
@@ -146,8 +153,10 @@
         return Constant(OpParameter<int32_t>(node));
       case IrOpcode::kInt64Constant:
         return Constant(OpParameter<int64_t>(node));
-      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat32Constant:
+        return Constant(OpParameter<float>(node));
       case IrOpcode::kFloat64Constant:
+      case IrOpcode::kNumberConstant:
         return Constant(OpParameter<double>(node));
       case IrOpcode::kExternalConstant:
         return Constant(OpParameter<ExternalReference>(node));
@@ -163,7 +172,7 @@
   UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(node->id());
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsDefined(node);
     return operand;
   }
@@ -171,7 +180,7 @@
   UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(node->id());
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsUsed(node);
     return operand;
   }
@@ -247,7 +256,7 @@
 
   void Negate() {
     DCHECK(!IsNone());
-    condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+    condition_ = NegateFlagsCondition(condition_);
   }
 
   void Commute() {
@@ -307,8 +316,6 @@
     if (negate) Negate();
   }
 
-  void SwapBlocks() { std::swap(true_block_, false_block_); }
-
   // Encodes this flags continuation into the given opcode.
   InstructionCode Encode(InstructionCode opcode) {
     opcode |= FlagsModeField::encode(mode_);
@@ -331,10 +338,10 @@
 // TODO(bmeurer): Get rid of the CallBuffer business and make
 // InstructionSelector::VisitCall platform independent instead.
 struct CallBuffer {
-  CallBuffer(Zone* zone, CallDescriptor* descriptor,
+  CallBuffer(Zone* zone, const CallDescriptor* descriptor,
              FrameStateDescriptor* frame_state);
 
-  CallDescriptor* descriptor;
+  const CallDescriptor* descriptor;
   FrameStateDescriptor* frame_state_descriptor;
   NodeVector output_nodes;
   InstructionOperandVector outputs;
diff --git a/src/compiler/instruction-selector-unittest.cc b/src/compiler/instruction-selector-unittest.cc
deleted file mode 100644
index aa70735..0000000
--- a/src/compiler/instruction-selector-unittest.cc
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-selector-unittest.h"
-
-#include "src/compiler/compiler-test-utils.h"
-#include "src/flags.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-
-}  // namespace
-
-
-InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
-
-
-InstructionSelectorTest::~InstructionSelectorTest() {}
-
-
-InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
-    InstructionSelector::Features features,
-    InstructionSelectorTest::StreamBuilderMode mode) {
-  Schedule* schedule = Export();
-  if (FLAG_trace_turbo) {
-    OFStream out(stdout);
-    out << "=== Schedule before instruction selection ===" << endl << *schedule;
-  }
-  EXPECT_NE(0, graph()->NodeCount());
-  CompilationInfo info(test_->isolate(), test_->zone());
-  Linkage linkage(&info, call_descriptor());
-  InstructionSequence sequence(&linkage, graph(), schedule);
-  SourcePositionTable source_position_table(graph());
-  InstructionSelector selector(&sequence, &source_position_table, features);
-  selector.SelectInstructions();
-  if (FLAG_trace_turbo) {
-    OFStream out(stdout);
-    out << "=== Code sequence after instruction selection ===" << endl
-        << sequence;
-  }
-  Stream s;
-  std::set<int> virtual_registers;
-  for (InstructionSequence::const_iterator i = sequence.begin();
-       i != sequence.end(); ++i) {
-    Instruction* instr = *i;
-    if (instr->opcode() < 0) continue;
-    if (mode == kTargetInstructions) {
-      switch (instr->arch_opcode()) {
-#define CASE(Name) \
-  case k##Name:    \
-    break;
-        TARGET_ARCH_OPCODE_LIST(CASE)
-#undef CASE
-        default:
-          continue;
-      }
-    }
-    if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
-      continue;
-    }
-    for (size_t i = 0; i < instr->OutputCount(); ++i) {
-      InstructionOperand* output = instr->OutputAt(i);
-      EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
-      if (output->IsConstant()) {
-        s.constants_.insert(std::make_pair(
-            output->index(), sequence.GetConstant(output->index())));
-        virtual_registers.insert(output->index());
-      } else if (output->IsUnallocated()) {
-        virtual_registers.insert(
-            UnallocatedOperand::cast(output)->virtual_register());
-      }
-    }
-    for (size_t i = 0; i < instr->InputCount(); ++i) {
-      InstructionOperand* input = instr->InputAt(i);
-      EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
-      if (input->IsImmediate()) {
-        s.immediates_.insert(std::make_pair(
-            input->index(), sequence.GetImmediate(input->index())));
-      } else if (input->IsUnallocated()) {
-        virtual_registers.insert(
-            UnallocatedOperand::cast(input)->virtual_register());
-      }
-    }
-    s.instructions_.push_back(instr);
-  }
-  for (std::set<int>::const_iterator i = virtual_registers.begin();
-       i != virtual_registers.end(); ++i) {
-    int virtual_register = *i;
-    if (sequence.IsDouble(virtual_register)) {
-      EXPECT_FALSE(sequence.IsReference(virtual_register));
-      s.doubles_.insert(virtual_register);
-    }
-    if (sequence.IsReference(virtual_register)) {
-      EXPECT_FALSE(sequence.IsDouble(virtual_register));
-      s.references_.insert(virtual_register);
-    }
-  }
-  for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
-    s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
-        InstructionSequence::StateId::FromInt(i)));
-  }
-  return s;
-}
-
-
-// -----------------------------------------------------------------------------
-// Return.
-
-
-TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
-  StreamBuilder m(this, kMachInt32, kMachInt32);
-  m.Return(m.Parameter(0));
-  Stream s = m.Build(kAllInstructions);
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
-  EXPECT_EQ(1U, s[1]->InputCount());
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
-  StreamBuilder m(this, kMachInt32);
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build(kAllInstructions);
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
-  EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
-  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
-  EXPECT_EQ(1U, s[1]->InputCount());
-}
-
-
-// -----------------------------------------------------------------------------
-// Conversions.
-
-
-TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
-  StreamBuilder m(this, kMachInt32, kMachFloat64);
-  m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
-  Stream s = m.Build(kAllInstructions);
-  ASSERT_EQ(3U, s.size());
-  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
-  EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
-  EXPECT_EQ(1U, s[1]->InputCount());
-  EXPECT_EQ(1U, s[1]->OutputCount());
-  EXPECT_EQ(kArchRet, s[2]->arch_opcode());
-}
-
-
-// -----------------------------------------------------------------------------
-// Parameters.
-
-
-TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
-  StreamBuilder m(this, kMachFloat64, kMachFloat64);
-  Node* param = m.Parameter(0);
-  m.Return(param);
-  Stream s = m.Build(kAllInstructions);
-  EXPECT_TRUE(s.IsDouble(param->id()));
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
-  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
-  Node* param = m.Parameter(0);
-  m.Return(param);
-  Stream s = m.Build(kAllInstructions);
-  EXPECT_TRUE(s.IsReference(param->id()));
-}
-
-
-// -----------------------------------------------------------------------------
-// Finish.
-
-
-TARGET_TEST_F(InstructionSelectorTest, Finish) {
-  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
-  Node* param = m.Parameter(0);
-  Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
-  m.Return(finish);
-  Stream s = m.Build(kAllInstructions);
-  ASSERT_EQ(3U, s.size());
-  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
-  ASSERT_EQ(1U, s[0]->OutputCount());
-  ASSERT_TRUE(s[0]->Output()->IsUnallocated());
-  EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
-  EXPECT_EQ(kArchNop, s[1]->arch_opcode());
-  ASSERT_EQ(1U, s[1]->InputCount());
-  ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
-  EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
-  ASSERT_EQ(1U, s[1]->OutputCount());
-  ASSERT_TRUE(s[1]->Output()->IsUnallocated());
-  EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
-  EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
-  EXPECT_TRUE(s.IsReference(finish->id()));
-}
-
-
-// -----------------------------------------------------------------------------
-// Phi.
-
-
-typedef InstructionSelectorTestWithParam<MachineType>
-    InstructionSelectorPhiTest;
-
-
-TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
-  const MachineType type = GetParam();
-  StreamBuilder m(this, type, type, type);
-  Node* param0 = m.Parameter(0);
-  Node* param1 = m.Parameter(1);
-  MLabel a, b, c;
-  m.Branch(m.Int32Constant(0), &a, &b);
-  m.Bind(&a);
-  m.Goto(&c);
-  m.Bind(&b);
-  m.Goto(&c);
-  m.Bind(&c);
-  Node* phi = m.Phi(type, param0, param1);
-  m.Return(phi);
-  Stream s = m.Build(kAllInstructions);
-  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param0->id()));
-  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param1->id()));
-}
-
-
-TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
-  const MachineType type = GetParam();
-  StreamBuilder m(this, type, type, type);
-  Node* param0 = m.Parameter(0);
-  Node* param1 = m.Parameter(1);
-  MLabel a, b, c;
-  m.Branch(m.Int32Constant(1), &a, &b);
-  m.Bind(&a);
-  m.Goto(&c);
-  m.Bind(&b);
-  m.Goto(&c);
-  m.Bind(&c);
-  Node* phi = m.Phi(type, param0, param1);
-  m.Return(phi);
-  Stream s = m.Build(kAllInstructions);
-  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param0->id()));
-  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param1->id()));
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
-                        ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
-                                          kMachInt16, kMachUint16, kMachInt32,
-                                          kMachUint32, kMachInt64, kMachUint64,
-                                          kMachPtr, kMachAnyTagged));
-
-
-// -----------------------------------------------------------------------------
-// ValueEffect.
-
-
-TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
-  StreamBuilder m1(this, kMachInt32, kMachPtr);
-  Node* p1 = m1.Parameter(0);
-  m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
-  Stream s1 = m1.Build(kAllInstructions);
-  StreamBuilder m2(this, kMachInt32, kMachPtr);
-  Node* p2 = m2.Parameter(0);
-  m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
-                       m2.NewNode(m2.common()->ValueEffect(1), p2)));
-  Stream s2 = m2.Build(kAllInstructions);
-  EXPECT_LE(3U, s1.size());
-  ASSERT_EQ(s1.size(), s2.size());
-  TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
-    const Instruction* i1 = s1[i];
-    const Instruction* i2 = s2[i];
-    EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
-    EXPECT_EQ(i1->InputCount(), i2->InputCount());
-    EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Calls with deoptimization.
-TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
-  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
-                  kMachAnyTagged);
-
-  BailoutId bailout_id(42);
-
-  Node* function_node = m.Parameter(0);
-  Node* receiver = m.Parameter(1);
-  Node* context = m.Parameter(2);
-
-  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
-  Node* locals = m.NewNode(m.common()->StateValues(0));
-  Node* stack = m.NewNode(m.common()->StateValues(0));
-  Node* context_dummy = m.Int32Constant(0);
-
-  Node* state_node = m.NewNode(
-      m.common()->FrameState(JS_FRAME, bailout_id, kPushOutput), parameters,
-      locals, stack, context_dummy, m.UndefinedConstant());
-  Node* call = m.CallJS0(function_node, receiver, context, state_node);
-  m.Return(call);
-
-  Stream s = m.Build(kAllExceptNopInstructions);
-
-  // Skip until kArchCallJSFunction.
-  size_t index = 0;
-  for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
-       index++) {
-  }
-  // Now we should have two instructions: call and return.
-  ASSERT_EQ(index + 2, s.size());
-
-  EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
-  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
-
-  // TODO(jarin) Check deoptimization table.
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
-  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
-                  kMachAnyTagged);
-
-  BailoutId bailout_id_before(42);
-
-  // Some arguments for the call node.
-  Node* function_node = m.Parameter(0);
-  Node* receiver = m.Parameter(1);
-  Node* context = m.Int32Constant(1);  // Context is ignored.
-
-  // Build frame state for the state before the call.
-  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
-  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
-  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
-
-  Node* context_sentinel = m.Int32Constant(0);
-  Node* frame_state_before = m.NewNode(
-      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
-      parameters, locals, stack, context_sentinel, m.UndefinedConstant());
-
-  // Build the call.
-  Node* call = m.CallFunctionStub0(function_node, receiver, context,
-                                   frame_state_before, CALL_AS_METHOD);
-
-  m.Return(call);
-
-  Stream s = m.Build(kAllExceptNopInstructions);
-
-  // Skip until kArchCallJSFunction.
-  size_t index = 0;
-  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
-       index++) {
-  }
-  // Now we should have two instructions: call, return.
-  ASSERT_EQ(index + 2, s.size());
-
-  // Check the call instruction
-  const Instruction* call_instr = s[index++];
-  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
-  size_t num_operands =
-      1 +  // Code object.
-      1 +
-      4 +  // Frame state deopt id + one input for each value in frame state.
-      1 +  // Function.
-      1;   // Context.
-  ASSERT_EQ(num_operands, call_instr->InputCount());
-
-  // Code object.
-  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
-
-  // Deoptimization id.
-  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
-  FrameStateDescriptor* desc_before =
-      s.GetFrameStateDescriptor(deopt_id_before);
-  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
-  EXPECT_EQ(kPushOutput, desc_before->state_combine());
-  EXPECT_EQ(1u, desc_before->parameters_count());
-  EXPECT_EQ(1u, desc_before->locals_count());
-  EXPECT_EQ(1u, desc_before->stack_count());
-  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
-  EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3)));
-  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(4)));
-  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(5)));
-
-  // Function.
-  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(6)));
-  // Context.
-  EXPECT_EQ(context->id(), s.ToVreg(call_instr->InputAt(7)));
-
-  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
-
-  EXPECT_EQ(index, s.size());
-}
-
-
-TARGET_TEST_F(InstructionSelectorTest,
-              CallFunctionStubDeoptRecursiveFrameState) {
-  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
-                  kMachAnyTagged);
-
-  BailoutId bailout_id_before(42);
-  BailoutId bailout_id_parent(62);
-
-  // Some arguments for the call node.
-  Node* function_node = m.Parameter(0);
-  Node* receiver = m.Parameter(1);
-  Node* context = m.Int32Constant(66);
-
-  // Build frame state for the state before the call.
-  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
-  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
-  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
-  Node* frame_state_parent = m.NewNode(
-      m.common()->FrameState(JS_FRAME, bailout_id_parent, kIgnoreOutput),
-      parameters, locals, stack, context, m.UndefinedConstant());
-
-  Node* context2 = m.Int32Constant(46);
-  Node* parameters2 =
-      m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
-  Node* locals2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
-  Node* stack2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
-  Node* frame_state_before = m.NewNode(
-      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
-      parameters2, locals2, stack2, context2, frame_state_parent);
-
-  // Build the call.
-  Node* call = m.CallFunctionStub0(function_node, receiver, context2,
-                                   frame_state_before, CALL_AS_METHOD);
-
-  m.Return(call);
-
-  Stream s = m.Build(kAllExceptNopInstructions);
-
-  // Skip until kArchCallJSFunction.
-  size_t index = 0;
-  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
-       index++) {
-  }
-  // Now we should have three instructions: call, return.
-  EXPECT_EQ(index + 2, s.size());
-
-  // Check the call instruction
-  const Instruction* call_instr = s[index++];
-  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
-  size_t num_operands =
-      1 +  // Code object.
-      1 +  // Frame state deopt id
-      4 +  // One input for each value in frame state + context.
-      4 +  // One input for each value in the parent frame state + context.
-      1 +  // Function.
-      1;   // Context.
-  EXPECT_EQ(num_operands, call_instr->InputCount());
-  // Code object.
-  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
-
-  // Deoptimization id.
-  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
-  FrameStateDescriptor* desc_before =
-      s.GetFrameStateDescriptor(deopt_id_before);
-  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
-  EXPECT_EQ(1u, desc_before->parameters_count());
-  EXPECT_EQ(1u, desc_before->locals_count());
-  EXPECT_EQ(1u, desc_before->stack_count());
-  EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
-  // Context:
-  EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
-  EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
-  EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
-  // Values from parent environment should follow.
-  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
-  EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
-  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(8)));
-  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(9)));
-
-  // Function.
-  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(10)));
-  // Context.
-  EXPECT_EQ(context2->id(), s.ToVreg(call_instr->InputAt(11)));
-  // Continuation.
-
-  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
-  EXPECT_EQ(index, s.size());
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/instruction-selector-unittest.h b/src/compiler/instruction-selector-unittest.h
deleted file mode 100644
index 4e12dab..0000000
--- a/src/compiler/instruction-selector-unittest.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
-#define V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
-
-#include <deque>
-#include <set>
-
-#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/test/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class InstructionSelectorTest : public TestWithContext, public TestWithZone {
- public:
-  InstructionSelectorTest();
-  virtual ~InstructionSelectorTest();
-
-  base::RandomNumberGenerator* rng() { return &rng_; }
-
-  class Stream;
-
-  enum StreamBuilderMode {
-    kAllInstructions,
-    kTargetInstructions,
-    kAllExceptNopInstructions
-  };
-
-  class StreamBuilder FINAL : public RawMachineAssembler {
-   public:
-    StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
-        : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
-                              MakeMachineSignature(test->zone(), return_type)),
-          test_(test) {}
-    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
-                  MachineType parameter0_type)
-        : RawMachineAssembler(
-              new (test->zone()) Graph(test->zone()),
-              MakeMachineSignature(test->zone(), return_type, parameter0_type)),
-          test_(test) {}
-    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
-                  MachineType parameter0_type, MachineType parameter1_type)
-        : RawMachineAssembler(
-              new (test->zone()) Graph(test->zone()),
-              MakeMachineSignature(test->zone(), return_type, parameter0_type,
-                                   parameter1_type)),
-          test_(test) {}
-    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
-                  MachineType parameter0_type, MachineType parameter1_type,
-                  MachineType parameter2_type)
-        : RawMachineAssembler(
-              new (test->zone()) Graph(test->zone()),
-              MakeMachineSignature(test->zone(), return_type, parameter0_type,
-                                   parameter1_type, parameter2_type)),
-          test_(test) {}
-
-    Stream Build(CpuFeature feature) {
-      return Build(InstructionSelector::Features(feature));
-    }
-    Stream Build(CpuFeature feature1, CpuFeature feature2) {
-      return Build(InstructionSelector::Features(feature1, feature2));
-    }
-    Stream Build(StreamBuilderMode mode = kTargetInstructions) {
-      return Build(InstructionSelector::Features(), mode);
-    }
-    Stream Build(InstructionSelector::Features features,
-                 StreamBuilderMode mode = kTargetInstructions);
-
-   private:
-    MachineSignature* MakeMachineSignature(Zone* zone,
-                                           MachineType return_type) {
-      MachineSignature::Builder builder(zone, 1, 0);
-      builder.AddReturn(return_type);
-      return builder.Build();
-    }
-
-    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
-                                           MachineType parameter0_type) {
-      MachineSignature::Builder builder(zone, 1, 1);
-      builder.AddReturn(return_type);
-      builder.AddParam(parameter0_type);
-      return builder.Build();
-    }
-
-    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
-                                           MachineType parameter0_type,
-                                           MachineType parameter1_type) {
-      MachineSignature::Builder builder(zone, 1, 2);
-      builder.AddReturn(return_type);
-      builder.AddParam(parameter0_type);
-      builder.AddParam(parameter1_type);
-      return builder.Build();
-    }
-
-    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
-                                           MachineType parameter0_type,
-                                           MachineType parameter1_type,
-                                           MachineType parameter2_type) {
-      MachineSignature::Builder builder(zone, 1, 3);
-      builder.AddReturn(return_type);
-      builder.AddParam(parameter0_type);
-      builder.AddParam(parameter1_type);
-      builder.AddParam(parameter2_type);
-      return builder.Build();
-    }
-
-   private:
-    InstructionSelectorTest* test_;
-  };
-
-  class Stream FINAL {
-   public:
-    size_t size() const { return instructions_.size(); }
-    const Instruction* operator[](size_t index) const {
-      EXPECT_LT(index, size());
-      return instructions_[index];
-    }
-
-    bool IsDouble(const InstructionOperand* operand) const {
-      return IsDouble(ToVreg(operand));
-    }
-    bool IsDouble(int virtual_register) const {
-      return doubles_.find(virtual_register) != doubles_.end();
-    }
-
-    bool IsInteger(const InstructionOperand* operand) const {
-      return IsInteger(ToVreg(operand));
-    }
-    bool IsInteger(int virtual_register) const {
-      return !IsDouble(virtual_register) && !IsReference(virtual_register);
-    }
-
-    bool IsReference(const InstructionOperand* operand) const {
-      return IsReference(ToVreg(operand));
-    }
-    bool IsReference(int virtual_register) const {
-      return references_.find(virtual_register) != references_.end();
-    }
-
-    int32_t ToInt32(const InstructionOperand* operand) const {
-      return ToConstant(operand).ToInt32();
-    }
-
-    int64_t ToInt64(const InstructionOperand* operand) const {
-      return ToConstant(operand).ToInt64();
-    }
-
-    int ToVreg(const InstructionOperand* operand) const {
-      if (operand->IsConstant()) return operand->index();
-      EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
-      return UnallocatedOperand::cast(operand)->virtual_register();
-    }
-
-    FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
-      EXPECT_LT(deoptimization_id, GetFrameStateDescriptorCount());
-      return deoptimization_entries_[deoptimization_id];
-    }
-
-    int GetFrameStateDescriptorCount() {
-      return static_cast<int>(deoptimization_entries_.size());
-    }
-
-   private:
-    Constant ToConstant(const InstructionOperand* operand) const {
-      ConstantMap::const_iterator i;
-      if (operand->IsConstant()) {
-        i = constants_.find(operand->index());
-        EXPECT_FALSE(constants_.end() == i);
-      } else {
-        EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
-        i = immediates_.find(operand->index());
-        EXPECT_FALSE(immediates_.end() == i);
-      }
-      EXPECT_EQ(operand->index(), i->first);
-      return i->second;
-    }
-
-    friend class StreamBuilder;
-
-    typedef std::map<int, Constant> ConstantMap;
-
-    ConstantMap constants_;
-    ConstantMap immediates_;
-    std::deque<Instruction*> instructions_;
-    std::set<int> doubles_;
-    std::set<int> references_;
-    std::deque<FrameStateDescriptor*> deoptimization_entries_;
-  };
-
-  base::RandomNumberGenerator rng_;
-};
-
-
-template <typename T>
-class InstructionSelectorTestWithParam
-    : public InstructionSelectorTest,
-      public ::testing::WithParamInterface<T> {};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 3c32b64..ffb8f9f 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/instruction-selector.h"
 
+#include "src/compiler/graph.h"
 #include "src/compiler/instruction-selector-impl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
@@ -13,17 +14,23 @@
 namespace internal {
 namespace compiler {
 
-InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
+                                         Linkage* linkage,
+                                         InstructionSequence* sequence,
+                                         Schedule* schedule,
                                          SourcePositionTable* source_positions,
                                          Features features)
-    : zone_(sequence->isolate()),
+    : zone_(local_zone),
+      linkage_(linkage),
       sequence_(sequence),
       source_positions_(source_positions),
       features_(features),
+      schedule_(schedule),
+      node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
       current_block_(NULL),
       instructions_(zone()),
-      defined_(graph()->NodeCount(), false, zone()),
-      used_(graph()->NodeCount(), false, zone()) {}
+      defined_(graph->NodeCount(), false, zone()),
+      used_(graph->NodeCount(), false, zone()) {}
 
 
 void InstructionSelector::SelectInstructions() {
@@ -32,17 +39,16 @@
   for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
     BasicBlock* block = *i;
     if (!block->IsLoopHeader()) continue;
-    DCHECK_NE(0, block->PredecessorCount());
-    DCHECK_NE(1, block->PredecessorCount());
+    DCHECK_NE(0, static_cast<int>(block->PredecessorCount()));
+    DCHECK_NE(1, static_cast<int>(block->PredecessorCount()));
     for (BasicBlock::const_iterator j = block->begin(); j != block->end();
          ++j) {
       Node* phi = *j;
       if (phi->opcode() != IrOpcode::kPhi) continue;
 
       // Mark all inputs as used.
-      Node::Inputs inputs = phi->inputs();
-      for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
-        MarkAsUsed(*k);
+      for (Node* const k : phi->inputs()) {
+        MarkAsUsed(k);
       }
     }
   }
@@ -55,13 +61,15 @@
   // Schedule the selected instructions.
   for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
     BasicBlock* block = *i;
-    size_t end = block->code_end_;
-    size_t start = block->code_start_;
-    sequence()->StartBlock(block);
+    InstructionBlock* instruction_block =
+        sequence()->InstructionBlockAt(block->GetRpoNumber());
+    size_t end = instruction_block->code_end();
+    size_t start = instruction_block->code_start();
+    sequence()->StartBlock(block->GetRpoNumber());
     while (start-- > end) {
-      sequence()->AddInstruction(instructions_[start], block);
+      sequence()->AddInstruction(instructions_[start]);
     }
-    sequence()->EndBlock(block);
+    sequence()->EndBlock(block->GetRpoNumber());
   }
 }
 
@@ -124,6 +132,31 @@
 
 
 Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d, e};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    InstructionOperand* e, InstructionOperand* f, size_t temp_count,
+    InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d, e, f};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
     InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
     size_t input_count, InstructionOperand** inputs, size_t temp_count,
     InstructionOperand** temps) {
@@ -140,18 +173,25 @@
 }
 
 
-bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
-  return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
-         block->deferred_ == current_block_->deferred_;
-}
-
-
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
   return node->OwnedBy(user) &&
          schedule()->block(node) == schedule()->block(user);
 }
 
 
+int InstructionSelector::GetVirtualRegister(const Node* node) {
+  if (node_map_[node->id()] == kNodeUnmapped) {
+    node_map_[node->id()] = sequence()->NextVirtualRegister();
+  }
+  return node_map_[node->id()];
+}
+
+
+int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
+  return node_map_[node->id()];
+}
+
+
 bool InstructionSelector::IsDefined(Node* node) const {
   DCHECK_NOT_NULL(node);
   NodeId id = node->id();
@@ -190,27 +230,48 @@
 
 bool InstructionSelector::IsDouble(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsDouble(node->id());
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsDouble(virtual_register);
 }
 
 
 void InstructionSelector::MarkAsDouble(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsReference(node));
-  sequence()->MarkAsDouble(node->id());
+  sequence()->MarkAsDouble(GetVirtualRegister(node));
 }
 
 
 bool InstructionSelector::IsReference(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsReference(node->id());
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsReference(virtual_register);
 }
 
 
 void InstructionSelector::MarkAsReference(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsDouble(node));
-  sequence()->MarkAsReference(node->id());
+  sequence()->MarkAsReference(GetVirtualRegister(node));
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineType rep,
+                                               InstructionOperand* op) {
+  UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+  switch (RepresentationOf(rep)) {
+    case kRepFloat32:
+    case kRepFloat64:
+      sequence()->MarkAsDouble(unalloc->virtual_register());
+      break;
+    case kRepTagged:
+      sequence()->MarkAsReference(unalloc->virtual_register());
+      break;
+    default:
+      break;
+  }
 }
 
 
@@ -232,7 +293,7 @@
 
 // TODO(bmeurer): Get rid of the CallBuffer business and make
 // InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
                        FrameStateDescriptor* frame_desc)
     : descriptor(d),
       frame_state_descriptor(frame_desc),
@@ -253,9 +314,11 @@
                                                bool call_code_immediate,
                                                bool call_address_immediate) {
   OperandGenerator g(this);
-  DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
-  DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
-            buffer->input_count() + buffer->frame_state_count());
+  DCHECK_EQ(call->op()->ValueOutputCount(),
+            static_cast<int>(buffer->descriptor->ReturnCount()));
+  DCHECK_EQ(
+      call->op()->ValueInputCount(),
+      static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
 
   if (buffer->descriptor->ReturnCount() > 0) {
     // Collect the projections that represent multiple outputs from this call.
@@ -267,15 +330,27 @@
     }
 
     // Filter out the outputs that aren't live because no projection uses them.
+    size_t outputs_needed_by_framestate =
+        buffer->frame_state_descriptor == NULL
+            ? 0
+            : buffer->frame_state_descriptor->state_combine()
+                  .ConsumedOutputCount();
     for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
-      if (buffer->output_nodes[i] != NULL) {
-        Node* output = buffer->output_nodes[i];
+      bool output_is_live =
+          buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+      if (output_is_live) {
         MachineType type =
             buffer->descriptor->GetReturnType(static_cast<int>(i));
         LinkageLocation location =
             buffer->descriptor->GetReturnLocation(static_cast<int>(i));
-        MarkAsRepresentation(type, output);
-        buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
+
+        Node* output = buffer->output_nodes[i];
+        InstructionOperand* op =
+            output == NULL ? g.TempLocation(location, type)
+                           : g.DefineAsLocation(output, location, type);
+        MarkAsRepresentation(type, op);
+
+        buffer->outputs.push_back(op);
       }
     }
   }
@@ -303,7 +378,7 @@
                         buffer->descriptor->GetInputType(0)));
       break;
   }
-  DCHECK_EQ(1, buffer->instruction_args.size());
+  DCHECK_EQ(1, static_cast<int>(buffer->instruction_args.size()));
 
   // If the call needs a frame state, we insert the state information as
   // follows (n is the number of value inputs to the frame state):
@@ -328,11 +403,10 @@
   // arguments require an explicit push instruction before the call and do
   // not appear as arguments to the call. Everything else ends up
   // as an InstructionOperand argument to the call.
-  InputIter iter(call->inputs().begin());
+  auto iter(call->inputs().begin());
   int pushed_count = 0;
   for (size_t index = 0; index < input_count; ++iter, ++index) {
     DCHECK(iter != call->inputs().end());
-    DCHECK(index == static_cast<size_t>(iter.index()));
     DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
     if (index == 0) continue;  // The first argument (callee) is already done.
     InstructionOperand* op =
@@ -382,9 +456,10 @@
   }
 
   // We're done with the block.
-  // TODO(bmeurer): We should not mutate the schedule.
-  block->code_end_ = current_block_end;
-  block->code_start_ = static_cast<int>(instructions_.size());
+  InstructionBlock* instruction_block =
+      sequence()->InstructionBlockAt(block->GetRpoNumber());
+  instruction_block->set_code_start(static_cast<int>(instructions_.size()));
+  instruction_block->set_code_end(current_block_end);
 
   current_block_ = NULL;
 }
@@ -402,11 +477,11 @@
 
 
 void InstructionSelector::VisitControl(BasicBlock* block) {
-  Node* input = block->control_input_;
-  switch (block->control_) {
-    case BasicBlockData::kGoto:
+  Node* input = block->control_input();
+  switch (block->control()) {
+    case BasicBlock::kGoto:
       return VisitGoto(block->SuccessorAt(0));
-    case BasicBlockData::kBranch: {
+    case BasicBlock::kBranch: {
       DCHECK_EQ(IrOpcode::kBranch, input->opcode());
       BasicBlock* tbranch = block->SuccessorAt(0);
       BasicBlock* fbranch = block->SuccessorAt(1);
@@ -417,16 +492,16 @@
       if (tbranch == fbranch) return VisitGoto(tbranch);
       return VisitBranch(input, tbranch, fbranch);
     }
-    case BasicBlockData::kReturn: {
+    case BasicBlock::kReturn: {
       // If the result itself is a return, return its input.
       Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
                         ? input->InputAt(0)
                         : input;
       return VisitReturn(value);
     }
-    case BasicBlockData::kThrow:
+    case BasicBlock::kThrow:
       return VisitThrow(input);
-    case BasicBlockData::kNone: {
+    case BasicBlock::kNone: {
       // TODO(titzer): exit block doesn't have control.
       DCHECK(input == NULL);
       break;
@@ -438,6 +513,135 @@
 }
 
 
+MachineType InstructionSelector::GetMachineType(Node* node) {
+  DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+    case IrOpcode::kLoop:
+    case IrOpcode::kEnd:
+    case IrOpcode::kBranch:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kMerge:
+    case IrOpcode::kTerminate:
+      // No code needed for these graph artifacts.
+      return kMachNone;
+    case IrOpcode::kFinish:
+      return kMachAnyTagged;
+    case IrOpcode::kParameter:
+      return linkage()->GetParameterType(OpParameter<int>(node));
+    case IrOpcode::kPhi:
+      return OpParameter<MachineType>(node);
+    case IrOpcode::kProjection:
+      // TODO(jarin) Really project from outputs.
+      return kMachAnyTagged;
+    case IrOpcode::kInt32Constant:
+      return kMachInt32;
+    case IrOpcode::kInt64Constant:
+      return kMachInt64;
+    case IrOpcode::kExternalConstant:
+      return kMachPtr;
+    case IrOpcode::kFloat64Constant:
+      return kMachFloat64;
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kNumberConstant:
+      return kMachAnyTagged;
+    case IrOpcode::kCall:
+      return kMachAnyTagged;
+    case IrOpcode::kFrameState:
+    case IrOpcode::kStateValues:
+      return kMachNone;
+    case IrOpcode::kLoad:
+      return OpParameter<LoadRepresentation>(node);
+    case IrOpcode::kStore:
+      return kMachNone;
+    case IrOpcode::kCheckedLoad:
+      return OpParameter<MachineType>(node);
+    case IrOpcode::kCheckedStore:
+      return kMachNone;
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+      return kMachInt32;
+    case IrOpcode::kWord32Equal:
+      return kMachBool;
+    case IrOpcode::kWord64And:
+    case IrOpcode::kWord64Or:
+    case IrOpcode::kWord64Xor:
+    case IrOpcode::kWord64Shl:
+    case IrOpcode::kWord64Shr:
+    case IrOpcode::kWord64Sar:
+    case IrOpcode::kWord64Ror:
+      return kMachInt64;
+    case IrOpcode::kWord64Equal:
+      return kMachBool;
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32SubWithOverflow:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32Mod:
+      return kMachInt32;
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+      return kMachBool;
+    case IrOpcode::kInt64Add:
+    case IrOpcode::kInt64Sub:
+    case IrOpcode::kInt64Mul:
+    case IrOpcode::kInt64Div:
+    case IrOpcode::kInt64Mod:
+      return kMachInt64;
+    case IrOpcode::kInt64LessThan:
+    case IrOpcode::kInt64LessThanOrEqual:
+      return kMachBool;
+    case IrOpcode::kChangeFloat32ToFloat64:
+    case IrOpcode::kChangeInt32ToFloat64:
+    case IrOpcode::kChangeUint32ToFloat64:
+      return kMachFloat64;
+    case IrOpcode::kChangeFloat64ToInt32:
+      return kMachInt32;
+    case IrOpcode::kChangeFloat64ToUint32:
+      return kMachUint32;
+    case IrOpcode::kChangeInt32ToInt64:
+      return kMachInt64;
+    case IrOpcode::kChangeUint32ToUint64:
+      return kMachUint64;
+    case IrOpcode::kTruncateFloat64ToFloat32:
+      return kMachFloat32;
+    case IrOpcode::kTruncateFloat64ToInt32:
+    case IrOpcode::kTruncateInt64ToInt32:
+      return kMachInt32;
+    case IrOpcode::kFloat64Add:
+    case IrOpcode::kFloat64Sub:
+    case IrOpcode::kFloat64Mul:
+    case IrOpcode::kFloat64Div:
+    case IrOpcode::kFloat64Mod:
+    case IrOpcode::kFloat64Sqrt:
+    case IrOpcode::kFloat64Floor:
+    case IrOpcode::kFloat64Ceil:
+    case IrOpcode::kFloat64RoundTruncate:
+    case IrOpcode::kFloat64RoundTiesAway:
+      return kMachFloat64;
+    case IrOpcode::kFloat64Equal:
+    case IrOpcode::kFloat64LessThan:
+    case IrOpcode::kFloat64LessThanOrEqual:
+      return kMachBool;
+    default:
+      V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+               node->opcode(), node->op()->mnemonic(), node->id());
+  }
+  return kMachNone;
+}
+
+
 void InstructionSelector::VisitNode(Node* node) {
   DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
   SourcePosition source_position = source_positions_->GetSourcePosition(node);
@@ -476,6 +680,8 @@
     case IrOpcode::kInt64Constant:
     case IrOpcode::kExternalConstant:
       return VisitConstant(node);
+    case IrOpcode::kFloat32Constant:
+      return MarkAsDouble(node), VisitConstant(node);
     case IrOpcode::kFloat64Constant:
       return MarkAsDouble(node), VisitConstant(node);
     case IrOpcode::kHeapConstant:
@@ -483,7 +689,7 @@
       // TODO(turbofan): only mark non-smis as references.
       return MarkAsReference(node), VisitConstant(node);
     case IrOpcode::kCall:
-      return VisitCall(node, NULL, NULL);
+      return VisitCall(node);
     case IrOpcode::kFrameState:
     case IrOpcode::kStateValues:
       return;
@@ -536,22 +742,26 @@
       return VisitInt32SubWithOverflow(node);
     case IrOpcode::kInt32Mul:
       return VisitInt32Mul(node);
+    case IrOpcode::kInt32MulHigh:
+      return VisitInt32MulHigh(node);
     case IrOpcode::kInt32Div:
       return VisitInt32Div(node);
-    case IrOpcode::kInt32UDiv:
-      return VisitInt32UDiv(node);
     case IrOpcode::kInt32Mod:
       return VisitInt32Mod(node);
-    case IrOpcode::kInt32UMod:
-      return VisitInt32UMod(node);
     case IrOpcode::kInt32LessThan:
       return VisitInt32LessThan(node);
     case IrOpcode::kInt32LessThanOrEqual:
       return VisitInt32LessThanOrEqual(node);
+    case IrOpcode::kUint32Div:
+      return VisitUint32Div(node);
     case IrOpcode::kUint32LessThan:
       return VisitUint32LessThan(node);
     case IrOpcode::kUint32LessThanOrEqual:
       return VisitUint32LessThanOrEqual(node);
+    case IrOpcode::kUint32Mod:
+      return VisitUint32Mod(node);
+    case IrOpcode::kUint32MulHigh:
+      return VisitUint32MulHigh(node);
     case IrOpcode::kInt64Add:
       return VisitInt64Add(node);
     case IrOpcode::kInt64Sub:
@@ -560,16 +770,20 @@
       return VisitInt64Mul(node);
     case IrOpcode::kInt64Div:
       return VisitInt64Div(node);
-    case IrOpcode::kInt64UDiv:
-      return VisitInt64UDiv(node);
     case IrOpcode::kInt64Mod:
       return VisitInt64Mod(node);
-    case IrOpcode::kInt64UMod:
-      return VisitInt64UMod(node);
     case IrOpcode::kInt64LessThan:
       return VisitInt64LessThan(node);
     case IrOpcode::kInt64LessThanOrEqual:
       return VisitInt64LessThanOrEqual(node);
+    case IrOpcode::kUint64Div:
+      return VisitUint64Div(node);
+    case IrOpcode::kUint64LessThan:
+      return VisitUint64LessThan(node);
+    case IrOpcode::kUint64Mod:
+      return VisitUint64Mod(node);
+    case IrOpcode::kChangeFloat32ToFloat64:
+      return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
     case IrOpcode::kChangeInt32ToFloat64:
       return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
     case IrOpcode::kChangeUint32ToFloat64:
@@ -582,6 +796,8 @@
       return VisitChangeInt32ToInt64(node);
     case IrOpcode::kChangeUint32ToUint64:
       return VisitChangeUint32ToUint64(node);
+    case IrOpcode::kTruncateFloat64ToFloat32:
+      return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
     case IrOpcode::kTruncateFloat64ToInt32:
       return VisitTruncateFloat64ToInt32(node);
     case IrOpcode::kTruncateInt64ToInt32:
@@ -604,91 +820,33 @@
       return VisitFloat64LessThan(node);
     case IrOpcode::kFloat64LessThanOrEqual:
       return VisitFloat64LessThanOrEqual(node);
+    case IrOpcode::kFloat64Floor:
+      return MarkAsDouble(node), VisitFloat64Floor(node);
+    case IrOpcode::kFloat64Ceil:
+      return MarkAsDouble(node), VisitFloat64Ceil(node);
+    case IrOpcode::kFloat64RoundTruncate:
+      return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+    case IrOpcode::kFloat64RoundTiesAway:
+      return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
+    case IrOpcode::kLoadStackPointer:
+      return VisitLoadStackPointer(node);
+    case IrOpcode::kCheckedLoad: {
+      MachineType rep = OpParameter<MachineType>(node);
+      MarkAsRepresentation(rep, node);
+      return VisitCheckedLoad(node);
+    }
+    case IrOpcode::kCheckedStore:
+      return VisitCheckedStore(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
+      break;
   }
 }
 
 
 #if V8_TURBOFAN_BACKEND
 
-void InstructionSelector::VisitWord32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
-  Int32BinopMatcher m(node);
-  if (m.right().Is(0)) {
-    return VisitWord32Test(m.left().node(), &cont);
-  }
-  VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
-  VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
-  VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
-  VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
-  VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitWord64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
-  Int64BinopMatcher m(node);
-  if (m.right().Is(0)) {
-    return VisitWord64Test(m.left().node(), &cont);
-  }
-  VisitWord64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
-  if (Node* ovf = node->FindProjection(1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    return VisitInt32AddWithOverflow(node, &cont);
-  }
-  FlagsContinuation cont;
-  VisitInt32AddWithOverflow(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
-  if (Node* ovf = node->FindProjection(1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    return VisitInt32SubWithOverflow(node, &cont);
-  }
-  FlagsContinuation cont;
-  VisitInt32SubWithOverflow(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
-  VisitWord64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
-  VisitWord64Compare(node, &cont);
-}
-
-
 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
   OperandGenerator g(this);
   Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
@@ -696,27 +854,15 @@
 }
 
 
-void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
-  VisitFloat64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnorderedLessThan, node);
-  VisitFloat64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
-  VisitFloat64Compare(node, &cont);
+void InstructionSelector::VisitLoadStackPointer(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchStackPointer, g.DefineAsRegister(node));
 }
 
 #endif  // V8_TURBOFAN_BACKEND
 
 // 32 bit targets do not implement the following instructions.
-#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
 
 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
 
@@ -739,6 +885,9 @@
 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
 
 
+void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
+
+
 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
 
 
@@ -751,13 +900,24 @@
 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
 
 
-void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
 
 
 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
 
 
-void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
 
 
 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
@@ -774,24 +934,7 @@
   UNIMPLEMENTED();
 }
 
-#endif  // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
-
-
-// 32-bit targets and unsupported architectures need dummy implementations of
-// selected 64-bit ops.
-#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
-
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
-  UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitWord64Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  UNIMPLEMENTED();
-}
-
-#endif  // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+#endif  // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
 
 
 void InstructionSelector::VisitFinish(Node* node) {
@@ -811,9 +954,15 @@
 
 
 void InstructionSelector::VisitPhi(Node* node) {
-  // TODO(bmeurer): Emit a PhiInstruction here.
-  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
-    MarkAsUsed(*i);
+  const int input_count = node->op()->ValueInputCount();
+  PhiInstruction* phi = new (instruction_zone())
+      PhiInstruction(instruction_zone(), GetVirtualRegister(node),
+                     static_cast<size_t>(input_count));
+  sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
+  for (int i = 0; i < input_count; ++i) {
+    Node* const input = node->InputAt(i);
+    MarkAsUsed(input);
+    phi->Extend(instruction_zone(), GetVirtualRegister(input));
   }
 }
 
@@ -846,124 +995,9 @@
 
 
 void InstructionSelector::VisitGoto(BasicBlock* target) {
-  if (IsNextInAssemblyOrder(target)) {
-    // fall through to the next block.
-    Emit(kArchNop, NULL)->MarkAsControl();
-  } else {
-    // jump to the next block.
-    OperandGenerator g(this);
-    Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
-  }
-}
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
-                                      BasicBlock* fbranch) {
+  // jump to the next block.
   OperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
-  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
-  // If we can fall through to the true block, invert the branch.
-  if (IsNextInAssemblyOrder(tbranch)) {
-    cont.Negate();
-    cont.SwapBlocks();
-  }
-
-  // Try to combine with comparisons against 0 by simply inverting the branch.
-  while (CanCover(user, value)) {
-    if (value->opcode() == IrOpcode::kWord32Equal) {
-      Int32BinopMatcher m(value);
-      if (m.right().Is(0)) {
-        user = value;
-        value = m.left().node();
-        cont.Negate();
-      } else {
-        break;
-      }
-    } else if (value->opcode() == IrOpcode::kWord64Equal) {
-      Int64BinopMatcher m(value);
-      if (m.right().Is(0)) {
-        user = value;
-        value = m.left().node();
-        cont.Negate();
-      } else {
-        break;
-      }
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
-    switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWord32Compare(value, &cont);
-      case IrOpcode::kInt32LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord32Compare(value, &cont);
-      case IrOpcode::kInt32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord32Compare(value, &cont);
-      case IrOpcode::kUint32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWord32Compare(value, &cont);
-      case IrOpcode::kUint32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWord32Compare(value, &cont);
-      case IrOpcode::kWord64Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWord64Compare(value, &cont);
-      case IrOpcode::kInt64LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord64Compare(value, &cont);
-      case IrOpcode::kInt64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord64Compare(value, &cont);
-      case IrOpcode::kFloat64Equal:
-        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
-        return VisitFloat64Compare(value, &cont);
-      case IrOpcode::kFloat64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
-        return VisitFloat64Compare(value, &cont);
-      case IrOpcode::kFloat64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
-        return VisitFloat64Compare(value, &cont);
-      case IrOpcode::kProjection:
-        // Check if this is the overflow output projection of an
-        // <Operation>WithOverflow node.
-        if (OpParameter<size_t>(value) == 1u) {
-          // We cannot combine the <Operation>WithOverflow with this branch
-          // unless the 0th projection (the use of the actual value of the
-          // <Operation> is either NULL, which means there's no use of the
-          // actual value, or was already defined, which means it is scheduled
-          // *AFTER* this branch).
-          Node* node = value->InputAt(0);
-          Node* result = node->FindProjection(0);
-          if (result == NULL || IsDefined(result)) {
-            switch (node->opcode()) {
-              case IrOpcode::kInt32AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitInt32AddWithOverflow(node, &cont);
-              case IrOpcode::kInt32SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitInt32SubWithOverflow(node, &cont);
-              default:
-                break;
-            }
-          }
-        }
-        break;
-      default:
-        break;
-    }
-  }
-
-  // Branch could not be combined with a compare, emit compare against 0.
-  VisitWord32Test(value, &cont);
+  Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
 }
 
 
@@ -983,14 +1017,29 @@
 }
 
 
+void InstructionSelector::FillTypeVectorFromStateValues(
+    ZoneVector<MachineType>* types, Node* state_values) {
+  DCHECK(state_values->opcode() == IrOpcode::kStateValues);
+  int count = state_values->InputCount();
+  types->reserve(static_cast<size_t>(count));
+  for (int i = 0; i < count; i++) {
+    types->push_back(GetMachineType(state_values->InputAt(i)));
+  }
+}
+
+
 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
     Node* state) {
   DCHECK(state->opcode() == IrOpcode::kFrameState);
   DCHECK_EQ(5, state->InputCount());
+  DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(0)->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(1)->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(2)->opcode());
   FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
-  int parameters = OpParameter<int>(state->InputAt(0));
-  int locals = OpParameter<int>(state->InputAt(1));
-  int stack = OpParameter<int>(state->InputAt(2));
+
+  int parameters = state->InputAt(0)->InputCount();
+  int locals = state->InputAt(1)->InputCount();
+  int stack = state->InputAt(2)->InputCount();
 
   FrameStateDescriptor* outer_state = NULL;
   Node* outer_node = state->InputAt(4);
@@ -998,8 +1047,8 @@
     outer_state = GetFrameStateDescriptor(outer_node);
   }
 
-  return new (instruction_zone())
-      FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
+  return new (instruction_zone()) FrameStateDescriptor(
+      instruction_zone(), state_info, parameters, locals, stack, outer_state);
 }
 
 
@@ -1034,23 +1083,36 @@
   DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
   DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
 
-  DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
-  DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
-  DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
+  DCHECK_EQ(static_cast<int>(descriptor->parameters_count()),
+            parameters->InputCount());
+  DCHECK_EQ(static_cast<int>(descriptor->locals_count()), locals->InputCount());
+  DCHECK_EQ(static_cast<int>(descriptor->stack_count()), stack->InputCount());
+
+  ZoneVector<MachineType> types(instruction_zone());
+  types.reserve(descriptor->GetSize());
 
   OperandGenerator g(this);
+  size_t value_index = 0;
   for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
-    inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+    Node* input_node = parameters->InputAt(i);
+    inputs->push_back(UseOrImmediate(&g, input_node));
+    descriptor->SetType(value_index++, GetMachineType(input_node));
   }
   if (descriptor->HasContext()) {
     inputs->push_back(UseOrImmediate(&g, context));
+    descriptor->SetType(value_index++, kMachAnyTagged);
   }
   for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
-    inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
+    Node* input_node = locals->InputAt(i);
+    inputs->push_back(UseOrImmediate(&g, input_node));
+    descriptor->SetType(value_index++, GetMachineType(input_node));
   }
   for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
-    inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
+    Node* input_node = stack->InputAt(i);
+    inputs->push_back(UseOrImmediate(&g, input_node));
+    descriptor->SetType(value_index++, GetMachineType(input_node));
   }
+  DCHECK(value_index == descriptor->GetSize());
 }
 
 
@@ -1062,38 +1124,21 @@
 #undef DECLARE_UNIMPLEMENTED_SELECTOR
 
 
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
+void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
   UNIMPLEMENTED();
 }
 
 
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  UNIMPLEMENTED();
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::Flag::kNoFlags;
 }
 
-
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
-  UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitWord32Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitFloat64Compare(Node* node,
-                                              FlagsContinuation* cont) {
-  UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
-                                    BasicBlock* deoptimization) {}
-
 #endif  // !V8_TURBOFAN_BACKEND
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index a86e156..5e3c52f 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -19,13 +19,20 @@
 // Forward declarations.
 struct CallBuffer;  // TODO(bmeurer): Remove this.
 class FlagsContinuation;
+class Linkage;
+
+typedef IntVector NodeToVregMap;
 
 class InstructionSelector FINAL {
  public:
+  static const int kNodeUnmapped = -1;
+
   // Forward declarations.
   class Features;
 
-  InstructionSelector(InstructionSequence* sequence,
+  // TODO(dcarney): pass in vreg mapping instead of graph.
+  InstructionSelector(Zone* local_zone, Graph* graph, Linkage* linkage,
+                      InstructionSequence* sequence, Schedule* schedule,
                       SourcePositionTable* source_positions,
                       Features features = SupportedFeatures());
 
@@ -52,6 +59,16 @@
                     InstructionOperand* a, InstructionOperand* b,
                     InstructionOperand* c, InstructionOperand* d,
                     size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    InstructionOperand* e, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    InstructionOperand* e, InstructionOperand* f,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
   Instruction* Emit(InstructionCode opcode, size_t output_count,
                     InstructionOperand** outputs, size_t input_count,
                     InstructionOperand** inputs, size_t temp_count = 0,
@@ -84,17 +101,13 @@
     return Features(CpuFeatures::SupportedFeatures());
   }
 
- private:
-  friend class OperandGenerator;
+  // TODO(sigurds) This should take a CpuFeatures argument.
+  static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
 
   // ===========================================================================
   // ============ Architecture-independent graph covering methods. =============
   // ===========================================================================
 
-  // Checks if {block} will appear directly after {current_block_} when
-  // assembling code, in which case, a fall-through can be used.
-  bool IsNextInAssemblyOrder(const BasicBlock* block) const;
-
   // Used in pattern matching during code generation.
   // Check if {node} can be covered while generating code for the current
   // instruction. A node can be covered if the {user} of the node has the only
@@ -105,13 +118,24 @@
   // generated for it.
   bool IsDefined(Node* node) const;
 
-  // Inform the instruction selection that {node} was just defined.
-  void MarkAsDefined(Node* node);
-
   // Checks if {node} has any uses, and therefore code has to be generated for
   // it.
   bool IsUsed(Node* node) const;
 
+  // Checks if {node} is currently live.
+  bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
+
+  int GetVirtualRegister(const Node* node);
+  // Gets the current mapping if it exists, kNodeUnmapped otherwise.
+  int GetMappedVirtualRegister(const Node* node) const;
+  const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }
+
+ private:
+  friend class OperandGenerator;
+
+  // Inform the instruction selection that {node} was just defined.
+  void MarkAsDefined(Node* node);
+
   // Inform the instruction selection that {node} has at least one use and we
   // will need to generate code for it.
   void MarkAsUsed(Node* node);
@@ -132,6 +156,10 @@
   // by {node}.
   void MarkAsRepresentation(MachineType rep, Node* node);
 
+  // Inform the register allocation of the representation of the unallocated
+  // operand {op}.
+  void MarkAsRepresentation(MachineType rep, InstructionOperand* op);
+
   // Initialize the call buffer with the InstructionOperands, nodes, etc,
   // corresponding
   // to the inputs and outputs of the call.
@@ -142,8 +170,11 @@
                             bool call_address_immediate);
 
   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+  void FillTypeVectorFromStateValues(ZoneVector<MachineType>* parameters,
+                                     Node* state_values);
   void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
                            FrameStateDescriptor* descriptor);
+  MachineType GetMachineType(Node* node);
 
   // ===========================================================================
   // ============= Architecture-specific graph covering methods. ===============
@@ -163,22 +194,12 @@
   MACHINE_OP_LIST(DECLARE_GENERATOR)
 #undef DECLARE_GENERATOR
 
-  void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
-  void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
-
-  void VisitWord32Test(Node* node, FlagsContinuation* cont);
-  void VisitWord64Test(Node* node, FlagsContinuation* cont);
-  void VisitWord32Compare(Node* node, FlagsContinuation* cont);
-  void VisitWord64Compare(Node* node, FlagsContinuation* cont);
-  void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
-
   void VisitFinish(Node* node);
   void VisitParameter(Node* node);
   void VisitPhi(Node* node);
   void VisitProjection(Node* node);
   void VisitConstant(Node* node);
-  void VisitCall(Node* call, BasicBlock* continuation,
-                 BasicBlock* deoptimization);
+  void VisitCall(Node* call);
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
   void VisitReturn(Node* value);
@@ -187,19 +208,21 @@
 
   // ===========================================================================
 
-  Graph* graph() const { return sequence()->graph(); }
-  Linkage* linkage() const { return sequence()->linkage(); }
-  Schedule* schedule() const { return sequence()->schedule(); }
+  Schedule* schedule() const { return schedule_; }
+  Linkage* linkage() const { return linkage_; }
   InstructionSequence* sequence() const { return sequence_; }
   Zone* instruction_zone() const { return sequence()->zone(); }
-  Zone* zone() { return &zone_; }
+  Zone* zone() const { return zone_; }
 
   // ===========================================================================
 
-  Zone zone_;
-  InstructionSequence* sequence_;
-  SourcePositionTable* source_positions_;
+  Zone* const zone_;
+  Linkage* const linkage_;
+  InstructionSequence* const sequence_;
+  SourcePositionTable* const source_positions_;
   Features features_;
+  Schedule* const schedule_;
+  NodeToVregMap node_map_;
   BasicBlock* current_block_;
   ZoneDeque<Instruction*> instructions_;
   BoolVector defined_;
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 9ab81b6..f83cdeb 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -2,18 +2,19 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/instruction.h"
-
 #include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-OStream& operator<<(OStream& os, const InstructionOperand& op) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionOperand& printable) {
+  const InstructionOperand& op = *printable.op_;
+  const RegisterConfiguration* conf = printable.register_configuration_;
   switch (op.kind()) {
-    case InstructionOperand::INVALID:
-      return os << "(0)";
     case InstructionOperand::UNALLOCATED: {
       const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
       os << "v" << unalloc->virtual_register();
@@ -24,10 +25,10 @@
         case UnallocatedOperand::NONE:
           return os;
         case UnallocatedOperand::FIXED_REGISTER:
-          return os << "(=" << Register::AllocationIndexToString(
+          return os << "(=" << conf->general_register_name(
                                    unalloc->fixed_register_index()) << ")";
         case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
-          return os << "(=" << DoubleRegister::AllocationIndexToString(
+          return os << "(=" << conf->double_register_name(
                                    unalloc->fixed_register_index()) << ")";
         case UnallocatedOperand::MUST_HAVE_REGISTER:
           return os << "(R)";
@@ -46,11 +47,9 @@
     case InstructionOperand::DOUBLE_STACK_SLOT:
       return os << "[double_stack:" << op.index() << "]";
     case InstructionOperand::REGISTER:
-      return os << "[" << Register::AllocationIndexToString(op.index())
-                << "|R]";
+      return os << "[" << conf->general_register_name(op.index()) << "|R]";
     case InstructionOperand::DOUBLE_REGISTER:
-      return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
-                << "|R]";
+      return os << "[" << conf->double_register_name(op.index()) << "|R]";
   }
   UNREACHABLE();
   return os;
@@ -95,9 +94,17 @@
 }
 
 
-OStream& operator<<(OStream& os, const MoveOperands& mo) {
-  os << *mo.destination();
-  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableMoveOperands& printable) {
+  const MoveOperands& mo = *printable.move_operands_;
+  PrintableInstructionOperand printable_op = {printable.register_configuration_,
+                                              mo.destination()};
+
+  os << printable_op;
+  if (!mo.source()->Equals(mo.destination())) {
+    printable_op.op_ = mo.source();
+    os << " = " << printable_op;
+  }
   return os << ";";
 }
 
@@ -110,14 +117,27 @@
 }
 
 
-OStream& operator<<(OStream& os, const ParallelMove& pm) {
+bool GapInstruction::IsRedundant() const {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant())
+      return false;
+  }
+  return true;
+}
+
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableParallelMove& printable) {
+  const ParallelMove& pm = *printable.parallel_move_;
   bool first = true;
   for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
        move != pm.move_operands()->end(); ++move) {
     if (move->IsEliminated()) continue;
     if (!first) os << " ";
     first = false;
-    os << *move;
+    PrintableMoveOperands pmo = {printable.register_configuration_, move};
+    os << pmo;
   }
   return os;
 }
@@ -152,7 +172,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const PointerMap& pm) {
+std::ostream& operator<<(std::ostream& os, const PointerMap& pm) {
   os << "{";
   for (ZoneList<InstructionOperand*>::iterator op =
            pm.pointer_operands_.begin();
@@ -164,7 +184,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
   switch (ao) {
 #define CASE(Name) \
   case k##Name:    \
@@ -177,7 +197,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const AddressingMode& am) {
+std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
   switch (am) {
     case kMode_None:
       return os;
@@ -192,7 +212,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const FlagsMode& fm) {
+std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
   switch (fm) {
     case kFlags_none:
       return os;
@@ -206,7 +226,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
   switch (fc) {
     case kEqual:
       return os << "equal";
@@ -250,11 +270,16 @@
 }
 
 
-OStream& operator<<(OStream& os, const Instruction& instr) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstruction& printable) {
+  const Instruction& instr = *printable.instr_;
+  PrintableInstructionOperand printable_op = {printable.register_configuration_,
+                                              NULL};
   if (instr.OutputCount() > 1) os << "(";
   for (size_t i = 0; i < instr.OutputCount(); i++) {
     if (i > 0) os << ", ";
-    os << *instr.OutputAt(i);
+    printable_op.op_ = instr.OutputAt(i);
+    os << printable_op;
   }
 
   if (instr.OutputCount() > 1) os << ") = ";
@@ -266,7 +291,11 @@
     for (int i = GapInstruction::FIRST_INNER_POSITION;
          i <= GapInstruction::LAST_INNER_POSITION; i++) {
       os << "(";
-      if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+      if (gap->parallel_moves_[i] != NULL) {
+        PrintableParallelMove ppm = {printable.register_configuration_,
+                                     gap->parallel_moves_[i]};
+        os << ppm;
+      }
       os << ") ";
     }
   } else if (instr.IsSourcePosition()) {
@@ -287,57 +316,175 @@
   }
   if (instr.InputCount() > 0) {
     for (size_t i = 0; i < instr.InputCount(); i++) {
-      os << " " << *instr.InputAt(i);
+      printable_op.op_ = instr.InputAt(i);
+      os << " " << printable_op;
     }
   }
-  return os << "\n";
+  return os;
 }
 
 
-OStream& operator<<(OStream& os, const Constant& constant) {
+std::ostream& operator<<(std::ostream& os, const Constant& constant) {
   switch (constant.type()) {
     case Constant::kInt32:
       return os << constant.ToInt32();
     case Constant::kInt64:
       return os << constant.ToInt64() << "l";
+    case Constant::kFloat32:
+      return os << constant.ToFloat32() << "f";
     case Constant::kFloat64:
       return os << constant.ToFloat64();
     case Constant::kExternalReference:
-      return os << constant.ToExternalReference().address();
+      return os << static_cast<const void*>(
+                       constant.ToExternalReference().address());
     case Constant::kHeapObject:
       return os << Brief(*constant.ToHeapObject());
+    case Constant::kRpoNumber:
+      return os << "RPO" << constant.ToRpoNumber().ToInt();
   }
   UNREACHABLE();
   return os;
 }
 
 
-Label* InstructionSequence::GetLabel(BasicBlock* block) {
-  return GetBlockStart(block)->label();
+InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
+                                   BasicBlock::RpoNumber rpo_number,
+                                   BasicBlock::RpoNumber loop_header,
+                                   BasicBlock::RpoNumber loop_end,
+                                   bool deferred)
+    : successors_(zone),
+      predecessors_(zone),
+      phis_(zone),
+      id_(id),
+      ao_number_(rpo_number),
+      rpo_number_(rpo_number),
+      loop_header_(loop_header),
+      loop_end_(loop_end),
+      code_start_(-1),
+      code_end_(-1),
+      deferred_(deferred) {}
+
+
+size_t InstructionBlock::PredecessorIndexOf(
+    BasicBlock::RpoNumber rpo_number) const {
+  size_t j = 0;
+  for (InstructionBlock::Predecessors::const_iterator i = predecessors_.begin();
+       i != predecessors_.end(); ++i, ++j) {
+    if (*i == rpo_number) break;
+  }
+  return j;
 }
 
 
-BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
-  return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+static BasicBlock::RpoNumber GetRpo(BasicBlock* block) {
+  if (block == NULL) return BasicBlock::RpoNumber::Invalid();
+  return block->GetRpoNumber();
 }
 
 
-void InstructionSequence::StartBlock(BasicBlock* block) {
-  block->code_start_ = static_cast<int>(instructions_.size());
-  BlockStartInstruction* block_start =
-      BlockStartInstruction::New(zone(), block);
-  AddInstruction(block_start, block);
+static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
+  if (!block->IsLoopHeader()) return BasicBlock::RpoNumber::Invalid();
+  return block->loop_end()->GetRpoNumber();
 }
 
 
-void InstructionSequence::EndBlock(BasicBlock* block) {
+static InstructionBlock* InstructionBlockFor(Zone* zone,
+                                             const BasicBlock* block) {
+  InstructionBlock* instr_block = new (zone) InstructionBlock(
+      zone, block->id(), block->GetRpoNumber(), GetRpo(block->loop_header()),
+      GetLoopEndRpo(block), block->deferred());
+  // Map successors and precessors
+  instr_block->successors().reserve(block->SuccessorCount());
+  for (auto it = block->successors_begin(); it != block->successors_end();
+       ++it) {
+    instr_block->successors().push_back((*it)->GetRpoNumber());
+  }
+  instr_block->predecessors().reserve(block->PredecessorCount());
+  for (auto it = block->predecessors_begin(); it != block->predecessors_end();
+       ++it) {
+    instr_block->predecessors().push_back((*it)->GetRpoNumber());
+  }
+  return instr_block;
+}
+
+
+InstructionBlocks* InstructionSequence::InstructionBlocksFor(
+    Zone* zone, const Schedule* schedule) {
+  InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
+  new (blocks) InstructionBlocks(
+      static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
+  size_t rpo_number = 0;
+  for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
+       it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
+    DCHECK_EQ(NULL, (*blocks)[rpo_number]);
+    DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
+    (*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
+  }
+  ComputeAssemblyOrder(blocks);
+  return blocks;
+}
+
+
+void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
+  int ao = 0;
+  for (auto const block : *blocks) {
+    if (!block->IsDeferred()) {
+      block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+    }
+  }
+  for (auto const block : *blocks) {
+    if (block->IsDeferred()) {
+      block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+    }
+  }
+}
+
+
+InstructionSequence::InstructionSequence(Zone* instruction_zone,
+                                         InstructionBlocks* instruction_blocks)
+    : zone_(instruction_zone),
+      instruction_blocks_(instruction_blocks),
+      block_starts_(zone()),
+      constants_(ConstantMap::key_compare(),
+                 ConstantMap::allocator_type(zone())),
+      immediates_(zone()),
+      instructions_(zone()),
+      next_virtual_register_(0),
+      pointer_maps_(zone()),
+      doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+      references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+      deoptimization_entries_(zone()) {
+  block_starts_.reserve(instruction_blocks_->size());
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(
+    BasicBlock::RpoNumber rpo) const {
+  const InstructionBlock* block = InstructionBlockAt(rpo);
+  return BlockStartInstruction::cast(InstructionAt(block->code_start()));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock::RpoNumber rpo) {
+  DCHECK(block_starts_.size() == rpo.ToSize());
+  InstructionBlock* block = InstructionBlockAt(rpo);
+  int code_start = static_cast<int>(instructions_.size());
+  block->set_code_start(code_start);
+  block_starts_.push_back(code_start);
+  BlockStartInstruction* block_start = BlockStartInstruction::New(zone());
+  AddInstruction(block_start);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock::RpoNumber rpo) {
   int end = static_cast<int>(instructions_.size());
-  DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
-  block->code_end_ = end;
+  InstructionBlock* block = InstructionBlockAt(rpo);
+  DCHECK(block->code_start() >= 0 && block->code_start() < end);
+  block->set_code_end(end);
 }
 
 
-int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+int InstructionSequence::AddInstruction(Instruction* instr) {
   // TODO(titzer): the order of these gaps is a holdover from Lithium.
   GapInstruction* gap = GapInstruction::New(zone());
   if (instr->IsControl()) instructions_.push_back(gap);
@@ -355,15 +502,17 @@
 }
 
 
-BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
-  // TODO(turbofan): Optimize this.
-  for (;;) {
-    DCHECK_LE(0, instruction_index);
-    Instruction* instruction = InstructionAt(instruction_index--);
-    if (instruction->IsBlockStart()) {
-      return BlockStartInstruction::cast(instruction)->block();
-    }
-  }
+const InstructionBlock* InstructionSequence::GetInstructionBlock(
+    int instruction_index) const {
+  DCHECK(instruction_blocks_->size() == block_starts_.size());
+  auto begin = block_starts_.begin();
+  auto end = std::lower_bound(begin, block_starts_.end(), instruction_index,
+                              std::less_equal<int>());
+  size_t index = std::distance(begin, end) - 1;
+  auto block = instruction_blocks_->at(index);
+  DCHECK(block->code_start() <= instruction_index &&
+         instruction_index < block->code_end());
+  return block;
 }
 
 
@@ -412,7 +561,81 @@
 }
 
 
-OStream& operator<<(OStream& os, const InstructionSequence& code) {
+FrameStateDescriptor::FrameStateDescriptor(
+    Zone* zone, const FrameStateCallInfo& state_info, size_t parameters_count,
+    size_t locals_count, size_t stack_count, FrameStateDescriptor* outer_state)
+    : type_(state_info.type()),
+      bailout_id_(state_info.bailout_id()),
+      frame_state_combine_(state_info.state_combine()),
+      parameters_count_(parameters_count),
+      locals_count_(locals_count),
+      stack_count_(stack_count),
+      types_(zone),
+      outer_state_(outer_state),
+      jsfunction_(state_info.jsfunction()) {
+  types_.resize(GetSize(), kMachNone);
+}
+
+size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
+  size_t size = parameters_count() + locals_count() + stack_count() +
+                (HasContext() ? 1 : 0);
+  switch (combine.kind()) {
+    case OutputFrameStateCombine::kPushOutput:
+      size += combine.GetPushCount();
+      break;
+    case OutputFrameStateCombine::kPokeAt:
+      break;
+  }
+  return size;
+}
+
+
+size_t FrameStateDescriptor::GetTotalSize() const {
+  size_t total_size = 0;
+  for (const FrameStateDescriptor* iter = this; iter != NULL;
+       iter = iter->outer_state_) {
+    total_size += iter->GetSize();
+  }
+  return total_size;
+}
+
+
+size_t FrameStateDescriptor::GetFrameCount() const {
+  size_t count = 0;
+  for (const FrameStateDescriptor* iter = this; iter != NULL;
+       iter = iter->outer_state_) {
+    ++count;
+  }
+  return count;
+}
+
+
+size_t FrameStateDescriptor::GetJSFrameCount() const {
+  size_t count = 0;
+  for (const FrameStateDescriptor* iter = this; iter != NULL;
+       iter = iter->outer_state_) {
+    if (iter->type_ == JS_FRAME) {
+      ++count;
+    }
+  }
+  return count;
+}
+
+
+MachineType FrameStateDescriptor::GetType(size_t index) const {
+  return types_[index];
+}
+
+
+void FrameStateDescriptor::SetType(size_t index, MachineType type) {
+  DCHECK(index < GetSize());
+  types_[index] = type;
+}
+
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionSequence& printable) {
+  const InstructionSequence& code = *printable.sequence_;
   for (size_t i = 0; i < code.immediates_.size(); ++i) {
     Constant constant = code.immediates_[i];
     os << "IMM#" << i << ": " << constant << "\n";
@@ -422,57 +645,53 @@
        it != code.constants_.end(); ++i, ++it) {
     os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
   }
-  for (int i = 0; i < code.BasicBlockCount(); i++) {
-    BasicBlock* block = code.BlockAt(i);
+  for (int i = 0; i < code.InstructionBlockCount(); i++) {
+    BasicBlock::RpoNumber rpo = BasicBlock::RpoNumber::FromInt(i);
+    const InstructionBlock* block = code.InstructionBlockAt(rpo);
+    CHECK(block->rpo_number() == rpo);
 
-    int bid = block->id();
-    os << "RPO#" << block->rpo_number_ << ": B" << bid;
-    CHECK(block->rpo_number_ == i);
+    os << "RPO#" << block->rpo_number();
+    os << ": AO#" << block->ao_number();
+    os << ": B" << block->id();
+    if (block->IsDeferred()) os << " (deferred)";
     if (block->IsLoopHeader()) {
-      os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
-         << ")";
+      os << " loop blocks: [" << block->rpo_number() << ", "
+         << block->loop_end() << ")";
     }
-    os << "  instructions: [" << block->code_start_ << ", " << block->code_end_
-       << ")\n  predecessors:";
+    os << "  instructions: [" << block->code_start() << ", "
+       << block->code_end() << ")\n  predecessors:";
 
-    BasicBlock::Predecessors predecessors = block->predecessors();
-    for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
-         iter != predecessors.end(); ++iter) {
-      os << " B" << (*iter)->id();
+    for (auto pred : block->predecessors()) {
+      const InstructionBlock* pred_block = code.InstructionBlockAt(pred);
+      os << " B" << pred_block->id();
     }
     os << "\n";
 
-    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
-         ++j) {
-      Node* phi = *j;
-      if (phi->opcode() != IrOpcode::kPhi) continue;
-      os << "     phi: v" << phi->id() << " =";
-      Node::Inputs inputs = phi->inputs();
-      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-           ++iter) {
-        os << " v" << (*iter)->id();
+    for (auto phi : block->phis()) {
+      PrintableInstructionOperand printable_op = {
+          printable.register_configuration_, phi->output()};
+      os << "     phi: " << printable_op << " =";
+      for (auto input : phi->inputs()) {
+        printable_op.op_ = input;
+        os << " " << printable_op;
       }
       os << "\n";
     }
 
     ScopedVector<char> buf(32);
+    PrintableInstruction printable_instr;
+    printable_instr.register_configuration_ = printable.register_configuration_;
     for (int j = block->first_instruction_index();
          j <= block->last_instruction_index(); j++) {
       // TODO(svenpanne) Add some basic formatting to our streams.
       SNPrintF(buf, "%5d", j);
-      os << "   " << buf.start() << ": " << *code.InstructionAt(j);
+      printable_instr.instr_ = code.InstructionAt(j);
+      os << "   " << buf.start() << ": " << printable_instr << "\n";
     }
 
-    os << "  " << block->control_;
-
-    if (block->control_input_ != NULL) {
-      os << " v" << block->control_input_->id();
-    }
-
-    BasicBlock::Successors successors = block->successors();
-    for (BasicBlock::Successors::iterator iter = successors.begin();
-         iter != successors.end(); ++iter) {
-      os << " B" << (*iter)->id();
+    for (auto succ : block->successors()) {
+      const InstructionBlock* succ_block = code.InstructionBlockAt(succ);
+      os << " B" << succ_block->id();
     }
     os << "\n";
   }
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 6d00784..daa83f2 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -6,48 +6,39 @@
 #define V8_COMPILER_INSTRUCTION_H_
 
 #include <deque>
+#include <iosfwd>
 #include <map>
 #include <set>
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
-#include "src/compiler/graph.h"
 #include "src/compiler/instruction-codes.h"
 #include "src/compiler/opcodes.h"
+#include "src/compiler/register-configuration.h"
 #include "src/compiler/schedule.h"
-// TODO(titzer): don't include the macro-assembler?
-#include "src/macro-assembler.h"
+#include "src/compiler/source-position.h"
 #include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class OStream;
-
 namespace compiler {
 
-// Forward declarations.
-class Linkage;
-
 // A couple of reserved opcodes are used for internal use.
 const InstructionCode kGapInstruction = -1;
 const InstructionCode kBlockStartInstruction = -2;
 const InstructionCode kSourcePositionInstruction = -3;
 
-
-#define INSTRUCTION_OPERAND_LIST(V)              \
-  V(Constant, CONSTANT, 128)                     \
-  V(Immediate, IMMEDIATE, 128)                   \
-  V(StackSlot, STACK_SLOT, 128)                  \
-  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
-  V(Register, REGISTER, Register::kNumRegisters) \
-  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+#define INSTRUCTION_OPERAND_LIST(V)                                  \
+  V(Constant, CONSTANT, 0)                                           \
+  V(Immediate, IMMEDIATE, 0)                                         \
+  V(StackSlot, STACK_SLOT, 128)                                      \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)                         \
+  V(Register, REGISTER, RegisterConfiguration::kMaxGeneralRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, RegisterConfiguration::kMaxDoubleRegisters)
 
 class InstructionOperand : public ZoneObject {
  public:
   enum Kind {
-    INVALID,
     UNALLOCATED,
     CONSTANT,
     IMMEDIATE,
@@ -57,7 +48,6 @@
     DOUBLE_REGISTER
   };
 
-  InstructionOperand() : value_(KindField::encode(INVALID)) {}
   InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
 
   Kind kind() const { return KindField::decode(value_); }
@@ -66,16 +56,15 @@
   bool Is##name() const { return kind() == type; }
   INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
   INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
-  INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
 #undef INSTRUCTION_OPERAND_PREDICATE
-  bool Equals(InstructionOperand* other) const {
+  bool Equals(const InstructionOperand* other) const {
     return value_ == other->value_;
   }
 
   void ConvertTo(Kind kind, int index) {
     if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
     value_ = KindField::encode(kind);
-    value_ |= index << KindField::kSize;
+    value_ |= bit_cast<unsigned>(index << KindField::kSize);
     DCHECK(this->index() == index);
   }
 
@@ -84,14 +73,20 @@
   static void TearDownCaches();
 
  protected:
-  typedef BitField<Kind, 0, 3> KindField;
+  typedef BitField64<Kind, 0, 3> KindField;
 
-  unsigned value_;
+  uint64_t value_;
 };
 
 typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
 
-OStream& operator<<(OStream& os, const InstructionOperand& op);
+struct PrintableInstructionOperand {
+  const RegisterConfiguration* register_configuration_;
+  const InstructionOperand* op_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionOperand& op);
 
 class UnallocatedOperand : public InstructionOperand {
  public:
@@ -122,6 +117,7 @@
 
   explicit UnallocatedOperand(ExtendedPolicy policy)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -130,14 +126,16 @@
   UnallocatedOperand(BasicPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_SLOT);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(policy);
-    value_ |= index << FixedSlotIndexField::kShift;
+    value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
     DCHECK(this->fixed_slot_index() == index);
   }
 
   UnallocatedOperand(ExtendedPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -146,6 +144,7 @@
 
   UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(lifetime);
@@ -183,24 +182,25 @@
   //     +------------------------------------------+    P ... Policy
   //
   // The slot index is a signed value which requires us to decode it manually
-  // instead of using the BitField utility class.
+  // instead of using the BitField64 utility class.
 
   // The superclass has a KindField.
   STATIC_ASSERT(KindField::kSize == 3);
 
   // BitFields for all unallocated operands.
-  class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
-  class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+  class BasicPolicyField : public BitField64<BasicPolicy, 3, 1> {};
+  class VirtualRegisterField : public BitField64<unsigned, 4, 30> {};
 
   // BitFields specific to BasicPolicy::FIXED_SLOT.
-  class FixedSlotIndexField : public BitField<int, 22, 10> {};
+  class FixedSlotIndexField : public BitField64<int, 34, 30> {};
 
   // BitFields specific to BasicPolicy::EXTENDED_POLICY.
-  class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
-  class LifetimeField : public BitField<Lifetime, 25, 1> {};
-  class FixedRegisterField : public BitField<int, 26, 6> {};
+  class ExtendedPolicyField : public BitField64<ExtendedPolicy, 34, 3> {};
+  class LifetimeField : public BitField64<Lifetime, 37, 1> {};
+  class FixedRegisterField : public BitField64<int, 38, 6> {};
 
-  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kInvalidVirtualRegister = VirtualRegisterField::kMax;
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax;
   static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
   static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
   static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
@@ -244,7 +244,8 @@
   // [fixed_slot_index]: Only for FIXED_SLOT.
   int fixed_slot_index() const {
     DCHECK(HasFixedSlotPolicy());
-    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+    return static_cast<int>(bit_cast<int64_t>(value_) >>
+                            FixedSlotIndexField::kShift);
   }
 
   // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
@@ -260,7 +261,7 @@
   }
 
   // [lifetime]: Only for non-FIXED_SLOT.
-  bool IsUsedAtStart() {
+  bool IsUsedAtStart() const {
     DCHECK(basic_policy() == EXTENDED_POLICY);
     return LifetimeField::decode(value_) == USED_AT_START;
   }
@@ -288,16 +289,12 @@
   }
 
   // A move is redundant if it's been eliminated, if its source and
-  // destination are the same, or if its destination is unneeded or constant.
+  // destination are the same, or if its destination is  constant.
   bool IsRedundant() const {
-    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+    return IsEliminated() || source_->Equals(destination_) ||
            (destination_ != NULL && destination_->IsConstant());
   }
 
-  bool IsIgnored() const {
-    return destination_ != NULL && destination_->IsIgnored();
-  }
-
   // We clear both operands to indicate move that's been eliminated.
   void Eliminate() { source_ = destination_ = NULL; }
   bool IsEliminated() const {
@@ -310,7 +307,15 @@
   InstructionOperand* destination_;
 };
 
-OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+struct PrintableMoveOperands {
+  const RegisterConfiguration* register_configuration_;
+  const MoveOperands* move_operands_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
+
 
 template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
 class SubKindOperand FINAL : public InstructionOperand {
@@ -326,13 +331,18 @@
     return reinterpret_cast<SubKindOperand*>(op);
   }
 
+  static const SubKindOperand* cast(const InstructionOperand* op) {
+    DCHECK(op->kind() == kOperandKind);
+    return reinterpret_cast<const SubKindOperand*>(op);
+  }
+
   static void SetUpCache();
   static void TearDownCache();
 
  private:
   static SubKindOperand* cache;
 
-  SubKindOperand() : InstructionOperand() {}
+  SubKindOperand() : InstructionOperand(kOperandKind, 0) {}  // For the caches.
   explicit SubKindOperand(int index)
       : InstructionOperand(kOperandKind, index) {}
 };
@@ -363,7 +373,15 @@
   ZoneList<MoveOperands> move_operands_;
 };
 
-OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+struct PrintableParallelMove {
+  const RegisterConfiguration* register_configuration_;
+  const ParallelMove* parallel_move_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PrintableParallelMove& pm);
+
 
 class PointerMap FINAL : public ZoneObject {
  public:
@@ -391,14 +409,14 @@
   void RecordUntagged(InstructionOperand* op, Zone* zone);
 
  private:
-  friend OStream& operator<<(OStream& os, const PointerMap& pm);
+  friend std::ostream& operator<<(std::ostream& os, const PointerMap& pm);
 
   ZoneList<InstructionOperand*> pointer_operands_;
   ZoneList<InstructionOperand*> untagged_operands_;
   int instruction_position_;
 };
 
-OStream& operator<<(OStream& os, const PointerMap& pm);
+std::ostream& operator<<(std::ostream& os, const PointerMap& pm);
 
 // TODO(titzer): s/PointerMap/ReferenceMap/
 class Instruction : public ZoneObject {
@@ -417,6 +435,10 @@
     DCHECK(i < InputCount());
     return operands_[OutputCount() + i];
   }
+  void SetInputAt(size_t i, InstructionOperand* operand) {
+    DCHECK(i < InputCount());
+    operands_[OutputCount() + i] = operand;
+  }
 
   size_t TempCount() const { return TempCountField::decode(bit_field_); }
   InstructionOperand* TempAt(size_t i) const {
@@ -496,6 +518,17 @@
 
   void operator delete(void* pointer, void* location) { UNREACHABLE(); }
 
+  void OverwriteWithNop() {
+    opcode_ = ArchOpcodeField::encode(kArchNop);
+    bit_field_ = 0;
+    pointer_map_ = NULL;
+  }
+
+  bool IsNop() const {
+    return arch_opcode() == kArchNop && InputCount() == 0 &&
+           OutputCount() == 0 && TempCount() == 0;
+  }
+
  protected:
   explicit Instruction(InstructionCode opcode)
       : opcode_(opcode),
@@ -538,7 +571,13 @@
   InstructionOperand* operands_[1];
 };
 
-OStream& operator<<(OStream& os, const Instruction& instr);
+
+struct PrintableInstruction {
+  const RegisterConfiguration* register_configuration_;
+  const Instruction* instr_;
+};
+std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
+
 
 // Represents moves inserted before an instruction due to register allocation.
 // TODO(titzer): squash GapInstruction back into Instruction, since essentially
@@ -565,6 +604,14 @@
     return parallel_moves_[pos];
   }
 
+  const ParallelMove* GetParallelMove(InnerPosition pos) const {
+    return parallel_moves_[pos];
+  }
+
+  bool IsRedundant() const;
+
+  ParallelMove** parallel_moves() { return parallel_moves_; }
+
   static GapInstruction* New(Zone* zone) {
     void* buffer = zone->New(sizeof(GapInstruction));
     return new (buffer) GapInstruction(kGapInstruction);
@@ -589,22 +636,19 @@
   }
 
  private:
-  friend OStream& operator<<(OStream& os, const Instruction& instr);
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const PrintableInstruction& instr);
   ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
 };
 
 
 // This special kind of gap move instruction represents the beginning of a
 // block of code.
-// TODO(titzer): move code_start and code_end from BasicBlock to here.
 class BlockStartInstruction FINAL : public GapInstruction {
  public:
-  BasicBlock* block() const { return block_; }
-  Label* label() { return &label_; }
-
-  static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+  static BlockStartInstruction* New(Zone* zone) {
     void* buffer = zone->New(sizeof(BlockStartInstruction));
-    return new (buffer) BlockStartInstruction(block);
+    return new (buffer) BlockStartInstruction();
   }
 
   static BlockStartInstruction* cast(Instruction* instr) {
@@ -612,12 +656,13 @@
     return static_cast<BlockStartInstruction*>(instr);
   }
 
- private:
-  explicit BlockStartInstruction(BasicBlock* block)
-      : GapInstruction(kBlockStartInstruction), block_(block) {}
+  static const BlockStartInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsBlockStart());
+    return static_cast<const BlockStartInstruction*>(instr);
+  }
 
-  BasicBlock* block_;
-  Label label_;
+ private:
+  BlockStartInstruction() : GapInstruction(kBlockStartInstruction) {}
 };
 
 
@@ -654,21 +699,34 @@
 
 class Constant FINAL {
  public:
-  enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+  enum Type {
+    kInt32,
+    kInt64,
+    kFloat32,
+    kFloat64,
+    kExternalReference,
+    kHeapObject,
+    kRpoNumber
+  };
 
   explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
   explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+  explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
   explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
   explicit Constant(ExternalReference ref)
       : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
   explicit Constant(Handle<HeapObject> obj)
       : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+  explicit Constant(BasicBlock::RpoNumber rpo)
+      : type_(kRpoNumber), value_(rpo.ToInt()) {}
 
   Type type() const { return type_; }
 
   int32_t ToInt32() const {
-    DCHECK_EQ(kInt32, type());
-    return static_cast<int32_t>(value_);
+    DCHECK(type() == kInt32 || type() == kInt64);
+    const int32_t value = static_cast<int32_t>(value_);
+    DCHECK_EQ(value_, static_cast<int64_t>(value));
+    return value;
   }
 
   int64_t ToInt64() const {
@@ -677,6 +735,11 @@
     return value_;
   }
 
+  float ToFloat32() const {
+    DCHECK_EQ(kFloat32, type());
+    return bit_cast<float>(static_cast<int32_t>(value_));
+  }
+
   double ToFloat64() const {
     if (type() == kInt32) return ToInt32();
     DCHECK_EQ(kFloat64, type());
@@ -688,6 +751,11 @@
     return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
   }
 
+  BasicBlock::RpoNumber ToRpoNumber() const {
+    DCHECK_EQ(kRpoNumber, type());
+    return BasicBlock::RpoNumber::FromInt(static_cast<int>(value_));
+  }
+
   Handle<HeapObject> ToHeapObject() const {
     DCHECK_EQ(kHeapObject, type());
     return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
@@ -701,18 +769,10 @@
 
 class FrameStateDescriptor : public ZoneObject {
  public:
-  FrameStateDescriptor(const FrameStateCallInfo& state_info,
+  FrameStateDescriptor(Zone* zone, const FrameStateCallInfo& state_info,
                        size_t parameters_count, size_t locals_count,
                        size_t stack_count,
-                       FrameStateDescriptor* outer_state = NULL)
-      : type_(state_info.type()),
-        bailout_id_(state_info.bailout_id()),
-        frame_state_combine_(state_info.state_combine()),
-        parameters_count_(parameters_count),
-        locals_count_(locals_count),
-        stack_count_(stack_count),
-        outer_state_(outer_state),
-        jsfunction_(state_info.jsfunction()) {}
+                       FrameStateDescriptor* outer_state = NULL);
 
   FrameStateType type() const { return type_; }
   BailoutId bailout_id() const { return bailout_id_; }
@@ -722,55 +782,17 @@
   size_t stack_count() const { return stack_count_; }
   FrameStateDescriptor* outer_state() const { return outer_state_; }
   MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
-
-  size_t size() const {
-    return parameters_count_ + locals_count_ + stack_count_ +
-           (HasContext() ? 1 : 0);
-  }
-
-  size_t GetTotalSize() const {
-    size_t total_size = 0;
-    for (const FrameStateDescriptor* iter = this; iter != NULL;
-         iter = iter->outer_state_) {
-      total_size += iter->size();
-    }
-    return total_size;
-  }
-
-  size_t GetHeight(OutputFrameStateCombine override) const {
-    size_t height = size() - parameters_count();
-    switch (override) {
-      case kPushOutput:
-        ++height;
-        break;
-      case kIgnoreOutput:
-        break;
-    }
-    return height;
-  }
-
-  size_t GetFrameCount() const {
-    size_t count = 0;
-    for (const FrameStateDescriptor* iter = this; iter != NULL;
-         iter = iter->outer_state_) {
-      ++count;
-    }
-    return count;
-  }
-
-  size_t GetJSFrameCount() const {
-    size_t count = 0;
-    for (const FrameStateDescriptor* iter = this; iter != NULL;
-         iter = iter->outer_state_) {
-      if (iter->type_ == JS_FRAME) {
-        ++count;
-      }
-    }
-    return count;
-  }
-
   bool HasContext() const { return type_ == JS_FRAME; }
 
+  size_t GetSize(OutputFrameStateCombine combine =
+                     OutputFrameStateCombine::Ignore()) const;
+  size_t GetTotalSize() const;
+  size_t GetFrameCount() const;
+  size_t GetJSFrameCount() const;
+
+  MachineType GetType(size_t index) const;
+  void SetType(size_t index, MachineType type);
+
  private:
   FrameStateType type_;
   BailoutId bailout_id_;
@@ -778,11 +800,128 @@
   size_t parameters_count_;
   size_t locals_count_;
   size_t stack_count_;
+  ZoneVector<MachineType> types_;
   FrameStateDescriptor* outer_state_;
   MaybeHandle<JSFunction> jsfunction_;
 };
 
-OStream& operator<<(OStream& os, const Constant& constant);
+std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+
+class PhiInstruction FINAL : public ZoneObject {
+ public:
+  typedef ZoneVector<InstructionOperand*> Inputs;
+
+  PhiInstruction(Zone* zone, int virtual_register, size_t reserved_input_count)
+      : virtual_register_(virtual_register),
+        operands_(zone),
+        output_(nullptr),
+        inputs_(zone) {
+    UnallocatedOperand* output =
+        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+    output->set_virtual_register(virtual_register);
+    output_ = output;
+    inputs_.reserve(reserved_input_count);
+    operands_.reserve(reserved_input_count);
+  }
+
+  int virtual_register() const { return virtual_register_; }
+  const IntVector& operands() const { return operands_; }
+
+  void Extend(Zone* zone, int virtual_register) {
+    UnallocatedOperand* input =
+        new (zone) UnallocatedOperand(UnallocatedOperand::ANY);
+    input->set_virtual_register(virtual_register);
+    operands_.push_back(virtual_register);
+    inputs_.push_back(input);
+  }
+
+  InstructionOperand* output() const { return output_; }
+  const Inputs& inputs() const { return inputs_; }
+  Inputs& inputs() { return inputs_; }
+
+ private:
+  // TODO(dcarney): some of these fields are only for verification, move them to
+  // verifier.
+  const int virtual_register_;
+  IntVector operands_;
+  InstructionOperand* output_;
+  Inputs inputs_;
+};
+
+
+// Analogue of BasicBlock for Instructions instead of Nodes.
+class InstructionBlock FINAL : public ZoneObject {
+ public:
+  InstructionBlock(Zone* zone, BasicBlock::Id id,
+                   BasicBlock::RpoNumber rpo_number,
+                   BasicBlock::RpoNumber loop_header,
+                   BasicBlock::RpoNumber loop_end, bool deferred);
+
+  // Instruction indexes (used by the register allocator).
+  int first_instruction_index() const {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_start_;
+  }
+  int last_instruction_index() const {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_end_ - 1;
+  }
+
+  int32_t code_start() const { return code_start_; }
+  void set_code_start(int32_t start) { code_start_ = start; }
+
+  int32_t code_end() const { return code_end_; }
+  void set_code_end(int32_t end) { code_end_ = end; }
+
+  bool IsDeferred() const { return deferred_; }
+
+  BasicBlock::Id id() const { return id_; }
+  BasicBlock::RpoNumber ao_number() const { return ao_number_; }
+  BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
+  BasicBlock::RpoNumber loop_header() const { return loop_header_; }
+  BasicBlock::RpoNumber loop_end() const {
+    DCHECK(IsLoopHeader());
+    return loop_end_;
+  }
+  inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
+
+  typedef ZoneVector<BasicBlock::RpoNumber> Predecessors;
+  Predecessors& predecessors() { return predecessors_; }
+  const Predecessors& predecessors() const { return predecessors_; }
+  size_t PredecessorCount() const { return predecessors_.size(); }
+  size_t PredecessorIndexOf(BasicBlock::RpoNumber rpo_number) const;
+
+  typedef ZoneVector<BasicBlock::RpoNumber> Successors;
+  Successors& successors() { return successors_; }
+  const Successors& successors() const { return successors_; }
+  size_t SuccessorCount() const { return successors_.size(); }
+
+  typedef ZoneVector<PhiInstruction*> PhiInstructions;
+  const PhiInstructions& phis() const { return phis_; }
+  void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
+
+  void set_ao_number(BasicBlock::RpoNumber ao_number) {
+    ao_number_ = ao_number;
+  }
+
+ private:
+  Successors successors_;
+  Predecessors predecessors_;
+  PhiInstructions phis_;
+  const BasicBlock::Id id_;
+  BasicBlock::RpoNumber ao_number_;  // Assembly order number.
+  const BasicBlock::RpoNumber rpo_number_;
+  const BasicBlock::RpoNumber loop_header_;
+  const BasicBlock::RpoNumber loop_end_;
+  int32_t code_start_;   // start index of arch-specific code.
+  int32_t code_end_;     // end index of arch-specific code.
+  const bool deferred_;  // Block contains deferred code.
+};
 
 typedef ZoneDeque<Constant> ConstantDeque;
 typedef std::map<int, Constant, std::less<int>,
@@ -791,49 +930,49 @@
 typedef ZoneDeque<Instruction*> InstructionDeque;
 typedef ZoneDeque<PointerMap*> PointerMapDeque;
 typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+typedef ZoneVector<InstructionBlock*> InstructionBlocks;
+
+struct PrintableInstructionSequence;
+
 
 // Represents architecture-specific generated code before, during, and after
 // register allocation.
 // TODO(titzer): s/IsDouble/IsFloat64/
-class InstructionSequence FINAL {
+class InstructionSequence FINAL : public ZoneObject {
  public:
-  InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
-      : graph_(graph),
-        linkage_(linkage),
-        schedule_(schedule),
-        constants_(ConstantMap::key_compare(),
-                   ConstantMap::allocator_type(zone())),
-        immediates_(zone()),
-        instructions_(zone()),
-        next_virtual_register_(graph->NodeCount()),
-        pointer_maps_(zone()),
-        doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
-        references_(std::less<int>(),
-                    VirtualRegisterSet::allocator_type(zone())),
-        deoptimization_entries_(zone()) {}
+  static InstructionBlocks* InstructionBlocksFor(Zone* zone,
+                                                 const Schedule* schedule);
+  // Puts the deferred blocks last.
+  static void ComputeAssemblyOrder(InstructionBlocks* blocks);
+
+  InstructionSequence(Zone* zone, InstructionBlocks* instruction_blocks);
 
   int NextVirtualRegister() { return next_virtual_register_++; }
   int VirtualRegisterCount() const { return next_virtual_register_; }
 
-  int ValueCount() const { return graph_->NodeCount(); }
-
-  int BasicBlockCount() const {
-    return static_cast<int>(schedule_->rpo_order()->size());
+  const InstructionBlocks& instruction_blocks() const {
+    return *instruction_blocks_;
   }
 
-  BasicBlock* BlockAt(int rpo_number) const {
-    return (*schedule_->rpo_order())[rpo_number];
+  int InstructionBlockCount() const {
+    return static_cast<int>(instruction_blocks_->size());
   }
 
-  BasicBlock* GetContainingLoop(BasicBlock* block) {
-    return block->loop_header_;
+  InstructionBlock* InstructionBlockAt(BasicBlock::RpoNumber rpo_number) {
+    return instruction_blocks_->at(rpo_number.ToSize());
   }
 
-  int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+  int LastLoopInstructionIndex(const InstructionBlock* block) {
+    return instruction_blocks_->at(block->loop_end().ToSize() - 1)
+        ->last_instruction_index();
+  }
 
-  BasicBlock* GetBasicBlock(int instruction_index);
+  const InstructionBlock* InstructionBlockAt(
+      BasicBlock::RpoNumber rpo_number) const {
+    return instruction_blocks_->at(rpo_number.ToSize());
+  }
 
-  int GetVirtualRegister(Node* node) const { return node->id(); }
+  const InstructionBlock* GetInstructionBlock(int instruction_index) const;
 
   bool IsReference(int virtual_register) const;
   bool IsDouble(int virtual_register) const;
@@ -843,12 +982,12 @@
 
   void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
 
-  Label* GetLabel(BasicBlock* block);
-  BlockStartInstruction* GetBlockStart(BasicBlock* block);
+  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
 
   typedef InstructionDeque::const_iterator const_iterator;
   const_iterator begin() const { return instructions_.begin(); }
   const_iterator end() const { return instructions_.end(); }
+  const InstructionDeque& instructions() const { return instructions_; }
 
   GapInstruction* GapAt(int index) const {
     return GapInstruction::cast(InstructionAt(index));
@@ -860,22 +999,22 @@
     return instructions_[index];
   }
 
-  Frame* frame() { return &frame_; }
-  Graph* graph() const { return graph_; }
   Isolate* isolate() const { return zone()->isolate(); }
-  Linkage* linkage() const { return linkage_; }
-  Schedule* schedule() const { return schedule_; }
   const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
-  Zone* zone() const { return graph_->zone(); }
+  Zone* zone() const { return zone_; }
 
-  // Used by the code generator while adding instructions.
-  int AddInstruction(Instruction* instr, BasicBlock* block);
-  void StartBlock(BasicBlock* block);
-  void EndBlock(BasicBlock* block);
+  // Used by the instruction selector while adding instructions.
+  int AddInstruction(Instruction* instr);
+  void StartBlock(BasicBlock::RpoNumber rpo);
+  void EndBlock(BasicBlock::RpoNumber rpo);
 
-  void AddConstant(int virtual_register, Constant constant) {
+  int AddConstant(int virtual_register, Constant constant) {
+    // TODO(titzer): allow RPO numbers as constants?
+    DCHECK(constant.type() != Constant::kRpoNumber);
+    DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
     DCHECK(constants_.find(virtual_register) == constants_.end());
     constants_.insert(std::make_pair(virtual_register, constant));
+    return virtual_register;
   }
   Constant GetConstant(int virtual_register) const {
     ConstantMap::const_iterator it = constants_.find(virtual_register);
@@ -884,8 +1023,8 @@
     return it->second;
   }
 
-  typedef ConstantDeque Immediates;
-  const Immediates& immediates() const { return immediates_; }
+  typedef ZoneVector<Constant> Immediates;
+  Immediates& immediates() { return immediates_; }
 
   int AddImmediate(Constant constant) {
     int index = static_cast<int>(immediates_.size());
@@ -912,26 +1051,43 @@
   FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
   int GetFrameStateDescriptorCount();
 
+  BasicBlock::RpoNumber InputRpo(Instruction* instr, size_t index) {
+    InstructionOperand* operand = instr->InputAt(index);
+    Constant constant = operand->IsImmediate() ? GetImmediate(operand->index())
+                                               : GetConstant(operand->index());
+    return constant.ToRpoNumber();
+  }
+
  private:
-  friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const PrintableInstructionSequence& code);
 
   typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
 
-  Graph* graph_;
-  Linkage* linkage_;
-  Schedule* schedule_;
+  Zone* const zone_;
+  InstructionBlocks* const instruction_blocks_;
+  IntVector block_starts_;
   ConstantMap constants_;
-  ConstantDeque immediates_;
+  Immediates immediates_;
   InstructionDeque instructions_;
   int next_virtual_register_;
   PointerMapDeque pointer_maps_;
   VirtualRegisterSet doubles_;
   VirtualRegisterSet references_;
-  Frame frame_;
   DeoptimizationVector deoptimization_entries_;
+
+  DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
 };
 
-OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+struct PrintableInstructionSequence {
+  const RegisterConfiguration* register_configuration_;
+  const InstructionSequence* sequence_;
+};
+
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionSequence& code);
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/js-builtin-reducer-unittest.cc b/src/compiler/js-builtin-reducer-unittest.cc
deleted file mode 100644
index 51561d0..0000000
--- a/src/compiler/js-builtin-reducer-unittest.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-unittest.h"
-#include "src/compiler/js-builtin-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
-#include "src/compiler/typer.h"
-#include "testing/gmock-support.h"
-
-using testing::Capture;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class JSBuiltinReducerTest : public GraphTest {
- public:
-  JSBuiltinReducerTest() : javascript_(zone()) {}
-
- protected:
-  Reduction Reduce(Node* node) {
-    Typer typer(zone());
-    MachineOperatorBuilder machine;
-    JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
-    JSBuiltinReducer reducer(&jsgraph);
-    return reducer.Reduce(node);
-  }
-
-  Node* Parameter(Type* t, int32_t index = 0) {
-    Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
-    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
-    return n;
-  }
-
-  Node* UndefinedConstant() {
-    return HeapConstant(
-        Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
-  }
-
-  JSOperatorBuilder* javascript() { return &javascript_; }
-
- private:
-  JSOperatorBuilder javascript_;
-};
-
-
-namespace {
-
-// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
-Type* const kNumberTypes[] = {
-    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
-    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
-    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
-    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
-    Type::OrderedNumber(),   Type::Number()};
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Math.sqrt
-
-
-TEST_F(JSBuiltinReducerTest, MathSqrt) {
-  Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
-
-  TRACED_FOREACH(Type*, t0, kNumberTypes) {
-    Node* p0 = Parameter(t0, 0);
-    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
-    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
-                                  fun, UndefinedConstant(), p0);
-    Reduction r = Reduce(call);
-
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.max
-
-
-TEST_F(JSBuiltinReducerTest, MathMax0) {
-  Handle<JSFunction> f(isolate()->context()->math_max_fun());
-
-  Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
-  Node* call = graph()->NewNode(javascript()->Call(2, NO_CALL_FUNCTION_FLAGS),
-                                fun, UndefinedConstant());
-  Reduction r = Reduce(call);
-
-  ASSERT_TRUE(r.Changed());
-  EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathMax1) {
-  Handle<JSFunction> f(isolate()->context()->math_max_fun());
-
-  TRACED_FOREACH(Type*, t0, kNumberTypes) {
-    Node* p0 = Parameter(t0, 0);
-    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
-    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
-                                  fun, UndefinedConstant(), p0);
-    Reduction r = Reduce(call);
-
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(), p0);
-  }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathMax2) {
-  Handle<JSFunction> f(isolate()->context()->math_max_fun());
-
-  TRACED_FOREACH(Type*, t0, kNumberTypes) {
-    TRACED_FOREACH(Type*, t1, kNumberTypes) {
-      Node* p0 = Parameter(t0, 0);
-      Node* p1 = Parameter(t1, 1);
-      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
-      Node* call =
-          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
-                           UndefinedConstant(), p0, p1);
-      Reduction r = Reduce(call);
-
-      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
-        Capture<Node*> branch;
-        ASSERT_TRUE(r.Changed());
-        EXPECT_THAT(
-            r.replacement(),
-            IsPhi(kMachNone, p1, p0,
-                  IsMerge(IsIfTrue(CaptureEq(&branch)),
-                          IsIfFalse(AllOf(CaptureEq(&branch),
-                                          IsBranch(IsNumberLessThan(p0, p1),
-                                                   graph()->start()))))));
-      } else {
-        ASSERT_FALSE(r.Changed());
-        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
-      }
-    }
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.imul
-
-
-TEST_F(JSBuiltinReducerTest, MathImul) {
-  Handle<JSFunction> f(isolate()->context()->math_imul_fun());
-
-  TRACED_FOREACH(Type*, t0, kNumberTypes) {
-    TRACED_FOREACH(Type*, t1, kNumberTypes) {
-      Node* p0 = Parameter(t0, 0);
-      Node* p1 = Parameter(t1, 1);
-      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
-      Node* call =
-          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
-                           UndefinedConstant(), p0, p1);
-      Reduction r = Reduce(call);
-
-      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
-        ASSERT_TRUE(r.Changed());
-        EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
-      } else {
-        ASSERT_FALSE(r.Changed());
-        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
-      }
-    }
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index c57ac33..263b0fe 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -2,8 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/types.h"
@@ -80,7 +82,7 @@
   int GetJSCallArity() {
     DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
-    return OperatorProperties::GetValueInputCount(node_->op()) - 2;
+    return node_->op()->ValueInputCount() - 2;
   }
 
   Node* GetJSCallInput(int index) {
@@ -95,6 +97,30 @@
 };
 
 
+JSBuiltinReducer::JSBuiltinReducer(JSGraph* jsgraph)
+    : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+
+
+// ECMA-262, section 15.8.2.1.
+Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Unsigned32())) {
+    // Math.abs(a:uint32) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.abs(a:number) -> (a > 0 ? a : 0 - a)
+    Node* const value = r.left();
+    Node* const zero = jsgraph()->ZeroConstant();
+    return Replace(graph()->NewNode(
+        common()->Select(kMachNone),
+        graph()->NewNode(simplified()->NumberLessThan(), zero, value), value,
+        graph()->NewNode(simplified()->NumberSubtract(), zero, value)));
+  }
+  return NoChange();
+}
+
+
 // ECMA-262, section 15.8.2.17.
 Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
   JSCallReduction r(node);
@@ -122,16 +148,11 @@
     // Math.max(a:int32, b:int32, ...)
     Node* value = r.GetJSCallInput(0);
     for (int i = 1; i < r.GetJSCallArity(); i++) {
-      Node* p = r.GetJSCallInput(i);
-      Node* control = graph()->start();
-      Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
-
-      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
-      value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
+      Node* const input = r.GetJSCallInput(i);
+      value = graph()->NewNode(
+          common()->Select(kMachNone),
+          graph()->NewNode(simplified()->NumberLessThan(), input, value), input,
+          value);
     }
     return Replace(value);
   }
@@ -151,24 +172,84 @@
 }
 
 
+// ES6 draft 08-24-14, section 20.2.2.17.
+Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
+    Node* value =
+        graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 10-14-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  if (!machine()->HasFloat64Floor()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.floor(a:number) -> Float64Floor(a)
+    Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 10-14-14, section 20.2.2.10.
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  if (!machine()->HasFloat64Ceil()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.ceil(a:number) -> Float64Ceil(a)
+    Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
 Reduction JSBuiltinReducer::Reduce(Node* node) {
   JSCallReduction r(node);
 
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
+    case kMathAbs:
+      return ReplaceWithPureReduction(node, ReduceMathAbs(node));
     case kMathSqrt:
       return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
     case kMathMax:
       return ReplaceWithPureReduction(node, ReduceMathMax(node));
     case kMathImul:
       return ReplaceWithPureReduction(node, ReduceMathImul(node));
+    case kMathFround:
+      return ReplaceWithPureReduction(node, ReduceMathFround(node));
+    case kMathFloor:
+      return ReplaceWithPureReduction(node, ReduceMathFloor(node));
+    case kMathCeil:
+      return ReplaceWithPureReduction(node, ReduceMathCeil(node));
     default:
       break;
   }
   return NoChange();
 }
 
+
+Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* JSBuiltinReducer::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSBuiltinReducer::machine() const {
+  return jsgraph()->machine();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index 13927f6..ac6f266 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -6,33 +6,39 @@
 #define V8_COMPILER_JS_BUILTIN_REDUCER_H_
 
 #include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
 class JSBuiltinReducer FINAL : public Reducer {
  public:
-  explicit JSBuiltinReducer(JSGraph* jsgraph)
-      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
-  virtual ~JSBuiltinReducer() {}
+  explicit JSBuiltinReducer(JSGraph* jsgraph);
+  ~JSBuiltinReducer() FINAL {}
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Graph* graph() const { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
-  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
-  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
+  Reduction ReduceMathAbs(Node* node);
   Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
+  Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathFloor(Node* node);
+  Reduction ReduceMathCeil(Node* node);
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const;
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
 
   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index cd8932b..a700b47 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/graph-inl.h"
 #include "src/compiler/js-context-specialization.h"
+
+#include "src/compiler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
 #include "src/compiler/js-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 
@@ -15,46 +15,19 @@
 namespace internal {
 namespace compiler {
 
-class ContextSpecializationVisitor : public NullNodeVisitor {
- public:
-  explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
-      : spec_(spec) {}
-
-  GenericGraphVisit::Control Post(Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kJSLoadContext: {
-        Reduction r = spec_->ReduceJSLoadContext(node);
-        if (r.Changed() && r.replacement() != node) {
-          NodeProperties::ReplaceWithValue(node, r.replacement());
-          node->RemoveAllInputs();
-        }
-        break;
-      }
-      case IrOpcode::kJSStoreContext: {
-        Reduction r = spec_->ReduceJSStoreContext(node);
-        if (r.Changed() && r.replacement() != node) {
-          NodeProperties::ReplaceWithValue(node, r.replacement());
-          node->RemoveAllInputs();
-        }
-        break;
-      }
-      default:
-        break;
-    }
-    return GenericGraphVisit::CONTINUE;
+Reduction JSContextSpecializer::Reduce(Node* node) {
+  if (node == context_) {
+    Node* constant = jsgraph_->Constant(info_->context());
+    NodeProperties::ReplaceWithValue(node, constant);
+    return Replace(constant);
   }
-
- private:
-  JSContextSpecializer* spec_;
-};
-
-
-void JSContextSpecializer::SpecializeToContext() {
-  NodeProperties::ReplaceWithValue(context_,
-                                   jsgraph_->Constant(info_->context()));
-
-  ContextSpecializationVisitor visitor(this);
-  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+  if (node->opcode() == IrOpcode::kJSLoadContext) {
+    return ReduceJSLoadContext(node);
+  }
+  if (node->opcode() == IrOpcode::kJSStoreContext) {
+    return ReduceJSStoreContext(node);
+  }
+  return NoChange();
 }
 
 
@@ -64,14 +37,14 @@
   HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
   // If the context is not constant, no reduction can occur.
   if (!m.HasValue()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
-  ContextAccess access = OpParameter<ContextAccess>(node);
+  const ContextAccess& access = ContextAccessOf(node->op());
 
   // Find the right parent context.
   Context* context = *m.Value().handle();
-  for (int i = access.depth(); i > 0; --i) {
+  for (size_t i = access.depth(); i > 0; --i) {
     context = context->previous();
   }
 
@@ -79,30 +52,32 @@
   if (!access.immutable()) {
     // The access does not have to look up a parent, nothing to fold.
     if (access.depth() == 0) {
-      return Reducer::NoChange();
+      return NoChange();
     }
     const Operator* op = jsgraph_->javascript()->LoadContext(
         0, access.index(), access.immutable());
     node->set_op(op);
     Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
     node->ReplaceInput(0, jsgraph_->Constant(context_handle));
-    return Reducer::Changed(node);
+    return Changed(node);
   }
-  Handle<Object> value =
-      Handle<Object>(context->get(access.index()), info_->isolate());
+  Handle<Object> value = Handle<Object>(
+      context->get(static_cast<int>(access.index())), info_->isolate());
 
   // Even though the context slot is immutable, the context might have escaped
   // before the function to which it belongs has initialized the slot.
   // We must be conservative and check if the value in the slot is currently the
   // hole or undefined. If it is neither of these, then it must be initialized.
   if (value->IsUndefined() || value->IsTheHole()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   // Success. The context load can be replaced with the constant.
   // TODO(titzer): record the specialization for sharing code across multiple
   // contexts that have the same value in the corresponding context slot.
-  return Reducer::Replace(jsgraph_->Constant(value));
+  Node* constant = jsgraph_->Constant(value);
+  NodeProperties::ReplaceWithValue(node, constant);
+  return Replace(constant);
 }
 
 
@@ -112,19 +87,19 @@
   HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
   // If the context is not constant, no reduction can occur.
   if (!m.HasValue()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
-  ContextAccess access = OpParameter<ContextAccess>(node);
+  const ContextAccess& access = ContextAccessOf(node->op());
 
   // The access does not have to look up a parent, nothing to fold.
   if (access.depth() == 0) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   // Find the right parent context.
   Context* context = *m.Value().handle();
-  for (int i = access.depth(); i > 0; --i) {
+  for (size_t i = access.depth(); i > 0; --i) {
     context = context->previous();
   }
 
@@ -133,7 +108,7 @@
   Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
   node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
 
-  return Reducer::Changed(node);
+  return Changed(node);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
index b8b50ed..298d3a3 100644
--- a/src/compiler/js-context-specialization.h
+++ b/src/compiler/js-context-specialization.h
@@ -16,12 +16,14 @@
 
 // Specializes a given JSGraph to a given context, potentially constant folding
 // some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
-class JSContextSpecializer {
+class JSContextSpecializer : public Reducer {
  public:
   JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
       : info_(info), jsgraph_(jsgraph), context_(context) {}
 
-  void SpecializeToContext();
+  Reduction Reduce(Node* node) OVERRIDE;
+
+  // Visible for unit testing.
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
 
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 300604e..4886442 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/unique.h"
 
@@ -19,7 +20,7 @@
 JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
     : info_(info),
       jsgraph_(jsgraph),
-      linkage_(new (jsgraph->zone()) Linkage(info)) {}
+      linkage_(new (jsgraph->zone()) Linkage(jsgraph->zone(), info)) {}
 
 
 void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
@@ -32,40 +33,25 @@
 }
 
 
-Node* JSGenericLowering::SmiConstant(int32_t immediate) {
-  return jsgraph()->SmiConstant(immediate);
-}
-
-
-Node* JSGenericLowering::Int32Constant(int immediate) {
-  return jsgraph()->Int32Constant(immediate);
-}
-
-
-Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
-  return jsgraph()->HeapConstant(code);
-}
-
-
-Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
-  return jsgraph()->HeapConstant(function);
-}
-
-
-Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
-  return jsgraph()->ExternalConstant(ref);
-}
-
-
 Reduction JSGenericLowering::Reduce(Node* node) {
   switch (node->opcode()) {
-#define DECLARE_CASE(x) \
-  case IrOpcode::k##x:  \
-    Lower##x(node);     \
-    break;
-    DECLARE_CASE(Branch)
+#define DECLARE_CASE(x)  \
+    case IrOpcode::k##x: \
+      Lower##x(node);    \
+      break;
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
+    case IrOpcode::kBranch:
+      // TODO(mstarzinger): If typing is enabled then simplified lowering will
+      // have inserted the correct ChangeBoolToBit, otherwise we need to perform
+      // poor-man's representation inference here and insert manual change.
+      if (!info()->is_typing_enabled()) {
+        Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                                      jsgraph()->TrueConstant());
+        node->ReplaceInput(0, test);
+        break;
+      }
+      // Fall-through.
     default:
       // Nothing to see.
       return NoChange();
@@ -93,18 +79,18 @@
 #undef REPLACE_BINARY_OP_IC_CALL
 
 
-#define REPLACE_COMPARE_IC_CALL(op, token, pure)  \
+#define REPLACE_COMPARE_IC_CALL(op, token)        \
   void JSGenericLowering::Lower##op(Node* node) { \
-    ReplaceWithCompareIC(node, token, pure);      \
+    ReplaceWithCompareIC(node, token);            \
   }
-REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
-REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
-REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
-REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
-REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
-REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
-REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
-REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
 #undef REPLACE_COMPARE_IC_CALL
 
 
@@ -119,7 +105,7 @@
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
-REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kAbort)
 #undef REPLACE_RUNTIME
 
 
@@ -140,8 +126,7 @@
 }
 
 
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
-                                             bool pure) {
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
   Callable callable = CodeFactory::CompareIC(isolate(), token);
   bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
   CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
@@ -149,11 +134,11 @@
       CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
   NodeVector inputs(zone());
   inputs.reserve(node->InputCount() + 1);
-  inputs.push_back(CodeConstant(callable.code()));
+  inputs.push_back(jsgraph()->HeapConstant(callable.code()));
   inputs.push_back(NodeProperties::GetValueInput(node, 0));
   inputs.push_back(NodeProperties::GetValueInput(node, 1));
   inputs.push_back(NodeProperties::GetContextInput(node));
-  if (pure) {
+  if (node->op()->HasProperty(Operator::kPure)) {
     // A pure (strict) comparison doesn't have an effect, control or frame
     // state.  But for the graph, we need to add control and effect inputs.
     DCHECK(!has_frame_state);
@@ -172,7 +157,7 @@
                        static_cast<int>(inputs.size()), &inputs.front());
 
   node->ReplaceInput(0, compare);
-  node->ReplaceInput(1, SmiConstant(token));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(token));
 
   if (has_frame_state) {
     // Remove the frame state from inputs.
@@ -185,9 +170,10 @@
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
+  Operator::Properties properties = node->op()->properties();
   CallDescriptor* desc = linkage()->GetStubCallDescriptor(
-      callable.descriptor(), 0, flags | FlagsForNode(node));
-  Node* stub_code = CodeConstant(callable.code());
+      callable.descriptor(), 0, flags | FlagsForNode(node), properties);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
@@ -196,16 +182,17 @@
 void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
                                                Builtins::JavaScript id,
                                                int nargs) {
+  Operator::Properties properties = node->op()->properties();
   Callable callable =
       CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
-  CallDescriptor* desc =
-      linkage()->GetStubCallDescriptor(callable.descriptor(), nargs);
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), nargs, FlagsForNode(node), properties);
   // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
   // of code across native contexts. Fix this by loading from given context.
   Handle<JSFunction> function(
       JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
-  Node* stub_code = CodeConstant(callable.code());
-  Node* function_node = FunctionConstant(function);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* function_node = jsgraph()->HeapConstant(function);
   PatchInsertInput(node, 0, stub_code);
   PatchInsertInput(node, 1, function_node);
   PatchOperator(node, common()->Call(desc));
@@ -220,30 +207,15 @@
   int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
   CallDescriptor* desc =
       linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
-  Node* ref = ExternalConstant(ExternalReference(f, isolate()));
-  Node* arity = Int32Constant(nargs);
-  if (!centrystub_constant_.is_set()) {
-    centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
-  }
-  PatchInsertInput(node, 0, centrystub_constant_.get());
+  Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
+  Node* arity = jsgraph()->Int32Constant(nargs);
+  PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant(fun->result_size));
   PatchInsertInput(node, nargs + 1, ref);
   PatchInsertInput(node, nargs + 2, arity);
   PatchOperator(node, common()->Call(desc));
 }
 
 
-void JSGenericLowering::LowerBranch(Node* node) {
-  if (!info()->is_typing_enabled()) {
-    // TODO(mstarzinger): If typing is enabled then simplified lowering will
-    // have inserted the correct ChangeBoolToBit, otherwise we need to perform
-    // poor-man's representation inference here and insert manual change.
-    Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
-                                  jsgraph()->TrueConstant());
-    node->ReplaceInput(0, test);
-  }
-}
-
-
 void JSGenericLowering::LowerJSUnaryNot(Node* node) {
   Callable callable = CodeFactory::ToBoolean(
       isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
@@ -260,7 +232,7 @@
 
 void JSGenericLowering::LowerJSToNumber(Node* node) {
   Callable callable = CodeFactory::ToNumber(isolate());
-  ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags);
+  ReplaceWithStubCall(node, callable, FlagsForNode(node));
 }
 
 
@@ -275,15 +247,25 @@
 
 
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
-  Callable callable = CodeFactory::KeyedLoadIC(isolate());
+  const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
+  Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+  if (FLAG_vector_ics) {
+    PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
+    PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
+  }
   ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
 }
 
 
 void JSGenericLowering::LowerJSLoadNamed(Node* node) {
-  LoadNamedParameters p = OpParameter<LoadNamedParameters>(node);
-  Callable callable = CodeFactory::LoadIC(isolate(), p.contextual_mode);
-  PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name));
+  const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
+  Callable callable =
+      CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode());
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
+  if (FLAG_vector_ics) {
+    PatchInsertInput(node, 2, jsgraph()->SmiConstant(p.feedback().index()));
+    PatchInsertInput(node, 3, jsgraph()->HeapConstant(p.feedback().vector()));
+  }
   ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
 }
 
@@ -296,16 +278,16 @@
 
 
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
-  StoreNamedParameters params = OpParameter<StoreNamedParameters>(node);
-  Callable callable = CodeFactory::StoreIC(isolate(), params.strict_mode);
-  PatchInsertInput(node, 1, jsgraph()->HeapConstant(params.name));
+  const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
+  Callable callable = CodeFactory::StoreIC(isolate(), p.strict_mode());
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name()));
   ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
 }
 
 
 void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
   StrictMode strict_mode = OpParameter<StrictMode>(node);
-  PatchInsertInput(node, 2, SmiConstant(strict_mode));
+  PatchInsertInput(node, 2, jsgraph()->SmiConstant(strict_mode));
   ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
 }
 
@@ -321,44 +303,46 @@
       InstanceofStub::kArgsInRegisters);
   InstanceofStub stub(isolate(), flags);
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
-  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
-  Node* stub_code = CodeConstant(stub.GetCode());
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(d, 0, FlagsForNode(node));
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
 
 
 void JSGenericLowering::LowerJSLoadContext(Node* node) {
-  ContextAccess access = OpParameter<ContextAccess>(node);
-  // TODO(mstarzinger): Use simplified operators instead of machine operators
-  // here so that load/store optimization can be applied afterwards.
-  for (int i = 0; i < access.depth(); ++i) {
+  const ContextAccess& access = ContextAccessOf(node->op());
+  for (size_t i = 0; i < access.depth(); ++i) {
     node->ReplaceInput(
-        0, graph()->NewNode(
-               machine()->Load(kMachAnyTagged),
-               NodeProperties::GetValueInput(node, 0),
-               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
-               NodeProperties::GetEffectInput(node)));
+        0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+                            NodeProperties::GetValueInput(node, 0),
+                            jsgraph()->Int32Constant(
+                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
+                            NodeProperties::GetEffectInput(node),
+                            graph()->start()));
   }
-  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
+                            static_cast<int>(access.index()))));
+  node->AppendInput(zone(), graph()->start());
   PatchOperator(node, machine()->Load(kMachAnyTagged));
 }
 
 
 void JSGenericLowering::LowerJSStoreContext(Node* node) {
-  ContextAccess access = OpParameter<ContextAccess>(node);
-  // TODO(mstarzinger): Use simplified operators instead of machine operators
-  // here so that load/store optimization can be applied afterwards.
-  for (int i = 0; i < access.depth(); ++i) {
+  const ContextAccess& access = ContextAccessOf(node->op());
+  for (size_t i = 0; i < access.depth(); ++i) {
     node->ReplaceInput(
-        0, graph()->NewNode(
-               machine()->Load(kMachAnyTagged),
-               NodeProperties::GetValueInput(node, 0),
-               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
-               NodeProperties::GetEffectInput(node)));
+        0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+                            NodeProperties::GetValueInput(node, 0),
+                            jsgraph()->Int32Constant(
+                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
+                            NodeProperties::GetEffectInput(node),
+                            graph()->start()));
   }
   node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
-  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
+                            static_cast<int>(access.index()))));
   PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
                                                            kFullWriteBarrier)));
 }
@@ -370,32 +354,73 @@
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
   CallDescriptor* desc =
       linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
-  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   Node* construct = NodeProperties::GetValueInput(node, 0);
   PatchInsertInput(node, 0, stub_code);
-  PatchInsertInput(node, 1, Int32Constant(arity - 1));
+  PatchInsertInput(node, 1, jsgraph()->Int32Constant(arity - 1));
   PatchInsertInput(node, 2, construct);
   PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
   PatchOperator(node, common()->Call(desc));
 }
 
 
+bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
+  // Lower to a direct call to a constant JSFunction if legal.
+  const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+  int arg_count = static_cast<int>(p.arity() - 2);
+
+  // Check the function is a constant and is really a JSFunction.
+  HeapObjectMatcher<Object> function_const(node->InputAt(0));
+  if (!function_const.HasValue()) return false;  // not a constant.
+  Handle<Object> func = function_const.Value().handle();
+  if (!func->IsJSFunction()) return false;  // not a function.
+  Handle<JSFunction> function = Handle<JSFunction>::cast(func);
+  if (arg_count != function->shared()->formal_parameter_count()) return false;
+
+  // Check the receiver doesn't need to be wrapped.
+  Node* receiver = node->InputAt(1);
+  if (!NodeProperties::IsTyped(receiver)) return false;
+  Type* ok_receiver = Type::Union(Type::Undefined(), Type::Receiver(), zone());
+  if (!NodeProperties::GetBounds(receiver).upper->Is(ok_receiver)) return false;
+
+  int index = NodeProperties::FirstContextIndex(node);
+
+  // TODO(titzer): total hack to share function context constants.
+  // Remove this when the JSGraph canonicalizes heap constants.
+  Node* context = node->InputAt(index);
+  HeapObjectMatcher<Context> context_const(context);
+  if (!context_const.HasValue() ||
+      *(context_const.Value().handle()) != function->context()) {
+    context = jsgraph()->HeapConstant(Handle<Context>(function->context()));
+  }
+  node->ReplaceInput(index, context);
+  CallDescriptor* desc = linkage()->GetJSCallDescriptor(
+      1 + arg_count, jsgraph()->zone(), FlagsForNode(node));
+  PatchOperator(node, common()->Call(desc));
+  return true;
+}
+
+
 void JSGenericLowering::LowerJSCallFunction(Node* node) {
-  CallParameters p = OpParameter<CallParameters>(node);
-  CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
+  // Fast case: call function directly.
+  if (TryLowerDirectJSCall(node)) return;
+
+  // General case: CallFunctionStub.
+  const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+  int arg_count = static_cast<int>(p.arity() - 2);
+  CallFunctionStub stub(isolate(), arg_count, p.flags());
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
-  CallDescriptor* desc =
-      linkage()->GetStubCallDescriptor(d, p.arity - 1, FlagsForNode(node));
-  Node* stub_code = CodeConstant(stub.GetCode());
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+      d, static_cast<int>(p.arity() - 1), FlagsForNode(node));
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
 
 
 void JSGenericLowering::LowerJSCallRuntime(Node* node) {
-  Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
-  int arity = OperatorProperties::GetValueInputCount(node->op());
-  ReplaceWithRuntimeCall(node, function, arity);
+  const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
+  ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
 }
 
 }  // namespace compiler
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index 400f806..f626338 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -5,13 +5,12 @@
 #ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
 #define V8_COMPILER_JS_GENERIC_LOWERING_H_
 
-#include "src/v8.h"
-
 #include "src/allocation.h"
 #include "src/code-factory.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/opcodes.h"
 
 namespace v8 {
@@ -23,37 +22,34 @@
 class MachineOperatorBuilder;
 class Linkage;
 
+
 // Lowers JS-level operators to runtime and IC calls in the "generic" case.
-class JSGenericLowering : public Reducer {
+class JSGenericLowering FINAL : public Reducer {
  public:
   JSGenericLowering(CompilationInfo* info, JSGraph* graph);
-  virtual ~JSGenericLowering() {}
+  ~JSGenericLowering() FINAL {}
 
-  virtual Reduction Reduce(Node* node);
+  Reduction Reduce(Node* node) FINAL;
 
  protected:
 #define DECLARE_LOWER(x) void Lower##x(Node* node);
   // Dispatched depending on opcode.
-  ALL_OP_LIST(DECLARE_LOWER)
+  JS_OP_LIST(DECLARE_LOWER)
 #undef DECLARE_LOWER
 
-  // Helpers to create new constant nodes.
-  Node* SmiConstant(int immediate);
-  Node* Int32Constant(int immediate);
-  Node* CodeConstant(Handle<Code> code);
-  Node* FunctionConstant(Handle<JSFunction> function);
-  Node* ExternalConstant(ExternalReference ref);
-
   // Helpers to patch existing nodes in the graph.
   void PatchOperator(Node* node, const Operator* new_op);
   void PatchInsertInput(Node* node, int index, Node* input);
 
   // Helpers to replace existing nodes with a generic call.
-  void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+  void ReplaceWithCompareIC(Node* node, Token::Value token);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
+  // Helper for optimization of JSCallFunction.
+  bool TryLowerDirectJSCall(Node* node);
+
   Zone* zone() const { return graph()->zone(); }
   Isolate* isolate() const { return zone()->isolate(); }
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -67,7 +63,6 @@
   CompilationInfo* info_;
   JSGraph* jsgraph_;
   Linkage* linkage_;
-  SetOncePointer<Node> centrystub_constant_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 1309531..7759ba1 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/code-stubs.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/typer.h"
@@ -10,25 +11,22 @@
 namespace internal {
 namespace compiler {
 
-Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
-  Unique<Object> unique = Unique<Object>::CreateImmovable(object);
-  return NewNode(common()->HeapConstant(unique));
+Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
+  Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(object);
+  return graph()->NewNode(common()->HeapConstant(unique));
 }
 
 
-Node* JSGraph::NewNode(const Operator* op) {
-  Node* node = graph()->NewNode(op);
-  typer_->Init(node);
-  return node;
-}
-
-
-Node* JSGraph::CEntryStubConstant() {
-  if (!c_entry_stub_constant_.is_set()) {
-    c_entry_stub_constant_.set(
-        ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+Node* JSGraph::CEntryStubConstant(int result_size) {
+  if (result_size == 1) {
+    if (!c_entry_stub_constant_.is_set()) {
+      c_entry_stub_constant_.set(
+          ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+    }
+    return c_entry_stub_constant_.get();
   }
-  return c_entry_stub_constant_.get();
+
+  return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
 }
 
 
@@ -93,13 +91,13 @@
 }
 
 
-Node* JSGraph::HeapConstant(Unique<Object> value) {
+Node* JSGraph::HeapConstant(Unique<HeapObject> value) {
   // TODO(turbofan): canonicalize heap constants using Unique<T>
-  return NewNode(common()->HeapConstant(value));
+  return graph()->NewNode(common()->HeapConstant(value));
 }
 
 
-Node* JSGraph::HeapConstant(Handle<Object> value) {
+Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
   // TODO(titzer): We could also match against the addresses of immortable
   // immovables here, even without access to the heap, thus always
   // canonicalizing references to them.
@@ -107,7 +105,8 @@
   // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
   // value numbering. We need some sane way to compute a unique hash code for
   // arbitrary handles here.
-  Unique<Object> unique(reinterpret_cast<Address>(*value.location()), value);
+  Unique<HeapObject> unique(reinterpret_cast<Address>(*value.location()),
+                            value);
   return HeapConstant(unique);
 }
 
@@ -128,7 +127,7 @@
   } else if (value->IsTheHole()) {
     return TheHoleConstant();
   } else {
-    return HeapConstant(value);
+    return HeapConstant(Handle<HeapObject>::cast(value));
   }
 }
 
@@ -150,7 +149,16 @@
 Node* JSGraph::Int32Constant(int32_t value) {
   Node** loc = cache_.FindInt32Constant(value);
   if (*loc == NULL) {
-    *loc = NewNode(common()->Int32Constant(value));
+    *loc = graph()->NewNode(common()->Int32Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::Int64Constant(int64_t value) {
+  Node** loc = cache_.FindInt64Constant(value);
+  if (*loc == NULL) {
+    *loc = graph()->NewNode(common()->Int64Constant(value));
   }
   return *loc;
 }
@@ -159,7 +167,16 @@
 Node* JSGraph::NumberConstant(double value) {
   Node** loc = cache_.FindNumberConstant(value);
   if (*loc == NULL) {
-    *loc = NewNode(common()->NumberConstant(value));
+    *loc = graph()->NewNode(common()->NumberConstant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::Float32Constant(float value) {
+  Node** loc = cache_.FindFloat32Constant(value);
+  if (*loc == NULL) {
+    *loc = graph()->NewNode(common()->Float32Constant(value));
   }
   return *loc;
 }
@@ -168,7 +185,7 @@
 Node* JSGraph::Float64Constant(double value) {
   Node** loc = cache_.FindFloat64Constant(value);
   if (*loc == NULL) {
-    *loc = NewNode(common()->Float64Constant(value));
+    *loc = graph()->NewNode(common()->Float64Constant(value));
   }
   return *loc;
 }
@@ -177,10 +194,23 @@
 Node* JSGraph::ExternalConstant(ExternalReference reference) {
   Node** loc = cache_.FindExternalConstant(reference);
   if (*loc == NULL) {
-    *loc = NewNode(common()->ExternalConstant(reference));
+    *loc = graph()->NewNode(common()->ExternalConstant(reference));
   }
   return *loc;
 }
+
+
+void JSGraph::GetCachedNodes(NodeVector* nodes) {
+  cache_.GetCachedNodes(nodes);
+  SetOncePointer<Node>* ptrs[] = {
+      &c_entry_stub_constant_, &undefined_constant_, &the_hole_constant_,
+      &true_constant_,         &false_constant_,     &null_constant_,
+      &zero_constant_,         &one_constant_,       &nan_constant_};
+  for (size_t i = 0; i < arraysize(ptrs); i++) {
+    if (ptrs[i]->is_set()) nodes->push_back(ptrs[i]->get());
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 2b2dfd1..040a745 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -24,17 +24,15 @@
 class JSGraph : public ZoneObject {
  public:
   JSGraph(Graph* graph, CommonOperatorBuilder* common,
-          JSOperatorBuilder* javascript, Typer* typer,
-          MachineOperatorBuilder* machine)
+          JSOperatorBuilder* javascript, MachineOperatorBuilder* machine)
       : graph_(graph),
         common_(common),
         javascript_(javascript),
-        typer_(typer),
         machine_(machine),
         cache_(zone()) {}
 
   // Canonicalized global constants.
-  Node* CEntryStubConstant();
+  Node* CEntryStubConstant(int result_size);
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
@@ -46,11 +44,11 @@
 
   // Creates a HeapConstant node, possibly canonicalized, without inspecting the
   // object.
-  Node* HeapConstant(Unique<Object> value);
+  Node* HeapConstant(Unique<HeapObject> value);
 
   // Creates a HeapConstant node, possibly canonicalized, and may access the
   // heap to inspect the object.
-  Node* HeapConstant(Handle<Object> value);
+  Node* HeapConstant(Handle<HeapObject> value);
 
   // Creates a Constant node of the appropriate type for the given object.
   // Accesses the heap to inspect the object and determine whether one of the
@@ -69,6 +67,33 @@
     return Int32Constant(bit_cast<int32_t>(value));
   }
 
+  // Creates a HeapConstant node for either true or false.
+  Node* BooleanConstant(bool is_true) {
+    return is_true ? TrueConstant() : FalseConstant();
+  }
+
+  // Creates a Int64Constant node, usually canonicalized.
+  Node* Int64Constant(int64_t value);
+  Node* Uint64Constant(uint64_t value) {
+    return Int64Constant(bit_cast<int64_t>(value));
+  }
+
+  // Creates a Int32Constant/Int64Constant node, depending on the word size of
+  // the target machine.
+  // TODO(turbofan): Code using Int32Constant/Int64Constant to store pointer
+  // constants is probably not serializable.
+  Node* IntPtrConstant(intptr_t value) {
+    return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
+                             : Int64Constant(static_cast<int64_t>(value));
+  }
+  template <typename T>
+  Node* PointerConstant(T* value) {
+    return IntPtrConstant(bit_cast<intptr_t>(value));
+  }
+
+  // Creates a Float32Constant node, usually canonicalized.
+  Node* Float32Constant(float value);
+
   // Creates a Float64Constant node, usually canonicalized.
   Node* Float64Constant(double value);
 
@@ -80,20 +105,27 @@
     return Constant(immediate);
   }
 
+  // Creates a dummy Constant node, used to satisfy calling conventions of
+  // stubs and runtime functions that do not require a context.
+  Node* NoContextConstant() { return ZeroConstant(); }
+
   JSOperatorBuilder* javascript() { return javascript_; }
   CommonOperatorBuilder* common() { return common_; }
   MachineOperatorBuilder* machine() { return machine_; }
   Graph* graph() { return graph_; }
   Zone* zone() { return graph()->zone(); }
   Isolate* isolate() { return zone()->isolate(); }
+  Factory* factory() { return isolate()->factory(); }
+
+  void GetCachedNodes(NodeVector* nodes);
 
  private:
   Graph* graph_;
   CommonOperatorBuilder* common_;
   JSOperatorBuilder* javascript_;
-  Typer* typer_;
   MachineOperatorBuilder* machine_;
 
+  // TODO(titzer): make this into a simple array.
   SetOncePointer<Node> c_entry_stub_constant_;
   SetOncePointer<Node> undefined_constant_;
   SetOncePointer<Node> the_hole_constant_;
@@ -106,11 +138,10 @@
 
   CommonNodeCache cache_;
 
-  Node* ImmovableHeapConstant(Handle<Object> value);
+  Node* ImmovableHeapConstant(Handle<HeapObject> value);
   Node* NumberConstant(double value);
-  Node* NewNode(const Operator* op);
 
-  Factory* factory() { return isolate()->factory(); }
+  DISALLOW_COPY_AND_ASSIGN(JSGraph);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index af02145..d143382 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -2,13 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/ast.h"
+#include "src/ast-numbering.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/ast-graph-builder.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/js-inlining.h"
+#include "src/compiler/js-intrinsic-builder.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node-aux-data-inl.h"
 #include "src/compiler/node-matchers.h"
@@ -29,15 +31,19 @@
  public:
   explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
 
-  GenericGraphVisit::Control Post(Node* node) {
+  void Post(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kJSCallFunction:
-        inliner_->TryInlineCall(node);
+        inliner_->TryInlineJSCall(node);
+        break;
+      case IrOpcode::kJSCallRuntime:
+        if (FLAG_turbo_inlining_intrinsics) {
+          inliner_->TryInlineRuntimeCall(node);
+        }
         break;
       default:
         break;
     }
-    return GenericGraphVisit::CONTINUE;
   }
 
  private:
@@ -51,16 +57,6 @@
 }
 
 
-// TODO(sigurds) Find a home for this function and reuse it everywhere (esp. in
-// test cases, where similar code is currently duplicated).
-static void Parse(Handle<JSFunction> function, CompilationInfoWithZone* info) {
-  CHECK(Parser::Parse(info));
-  CHECK(Rewriter::Rewrite(info));
-  CHECK(Scope::Analyze(info));
-  CHECK(Compiler::EnsureDeoptimizationSupport(info));
-}
-
-
 // A facade on a JSFunction's graph to facilitate inlining. It assumes the
 // that the function graph has only one return statement, and provides
 // {UnifyReturn} to convert a function graph to that end.
@@ -90,7 +86,7 @@
   }
 
   // Counts JSFunction, Receiver, arguments, context but not effect, control.
-  size_t total_parameters() { return start_->op()->OutputCount(); }
+  size_t total_parameters() { return start_->op()->ValueOutputCount(); }
 
   // Counts only formal parameters.
   size_t formal_parameters() {
@@ -121,8 +117,7 @@
   }
   DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
 
-  int predecessors =
-      OperatorProperties::GetControlInputCount(final_merge->op());
+  int predecessors = final_merge->op()->ControlInputCount();
 
   const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
   const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
@@ -131,19 +126,17 @@
   NodeVector effects(jsgraph->zone());
   // Iterate over all control flow predecessors,
   // which must be return statements.
-  InputIter iter = final_merge->inputs().begin();
-  while (iter != final_merge->inputs().end()) {
-    Node* input = *iter;
+  for (Edge edge : final_merge->input_edges()) {
+    Node* input = edge.to();
     switch (input->opcode()) {
       case IrOpcode::kReturn:
         values.push_back(NodeProperties::GetValueInput(input, 0));
         effects.push_back(NodeProperties::GetEffectInput(input));
-        iter.UpdateToAndIncrement(NodeProperties::GetControlInput(input));
+        edge.UpdateTo(NodeProperties::GetControlInput(input));
         input->RemoveAllInputs();
         break;
       default:
         UNREACHABLE();
-        ++iter;
         break;
     }
   }
@@ -167,14 +160,13 @@
         source_graph_(source_graph),
         target_graph_(target_graph),
         temp_zone_(temp_zone),
-        sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, 0, 0,
-                     "sentinel") {}
+        sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "sentinel", 0, 0,
+                     0, 0, 0, 0) {}
 
-  GenericGraphVisit::Control Post(Node* original) {
+  void Post(Node* original) {
     NodeVector inputs(temp_zone_);
-    for (InputIter it = original->inputs().begin();
-         it != original->inputs().end(); ++it) {
-      inputs.push_back(GetCopy(*it));
+    for (Node* const node : original->inputs()) {
+      inputs.push_back(GetCopy(node));
     }
 
     // Reuse the operator in the copy. This assumes that op lives in a zone
@@ -183,7 +175,6 @@
         target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
                                (inputs.empty() ? NULL : &inputs.front()));
     copies_[original->id()] = copy;
-    return GenericGraphVisit::CONTINUE;
   }
 
   Node* GetCopy(Node* original) {
@@ -214,11 +205,10 @@
   }
 
   Node* GetSentinel(Node* original) {
-    Node* sentinel = sentinels_[original->id()];
-    if (sentinel == NULL) {
-      sentinel = target_graph_->NewNode(&sentinel_op_);
+    if (sentinels_[original->id()] == NULL) {
+      sentinels_[original->id()] = target_graph_->NewNode(&sentinel_op_);
     }
-    return sentinel;
+    return sentinels_[original->id()];
   }
 
   NodeVector copies_;
@@ -226,7 +216,7 @@
   Graph* source_graph_;
   Graph* target_graph_;
   Zone* temp_zone_;
-  SimpleOperator sentinel_op_;
+  Operator sentinel_op_;
 };
 
 
@@ -241,43 +231,41 @@
   Node* context = jsgraph->graph()->NewNode(
       simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
       NodeProperties::GetValueInput(call, 0),
-      NodeProperties::GetEffectInput(call));
+      NodeProperties::GetEffectInput(call), control);
 
   // Context is last argument.
   int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
   // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
   // context, effect, control.
-  int inliner_inputs = OperatorProperties::GetValueInputCount(call->op());
+  int inliner_inputs = call->op()->ValueInputCount();
   // Iterate over all uses of the start node.
-  UseIter iter = start_->uses().begin();
-  while (iter != start_->uses().end()) {
-    Node* use = *iter;
+  for (Edge edge : start_->use_edges()) {
+    Node* use = edge.from();
     switch (use->opcode()) {
       case IrOpcode::kParameter: {
         int index = 1 + OpParameter<int>(use->op());
         if (index < inliner_inputs && index < inlinee_context_index) {
           // There is an input from the call, and the index is a value
           // projection but not the context, so rewire the input.
-          NodeProperties::ReplaceWithValue(*iter, call->InputAt(index));
+          NodeProperties::ReplaceWithValue(use, call->InputAt(index));
         } else if (index == inlinee_context_index) {
           // This is the context projection, rewire it to the context from the
           // JSFunction object.
-          NodeProperties::ReplaceWithValue(*iter, context);
+          NodeProperties::ReplaceWithValue(use, context);
         } else if (index < inlinee_context_index) {
           // Call has fewer arguments than required, fill with undefined.
-          NodeProperties::ReplaceWithValue(*iter, jsgraph->UndefinedConstant());
+          NodeProperties::ReplaceWithValue(use, jsgraph->UndefinedConstant());
         } else {
           // We got too many arguments, discard for now.
           // TODO(sigurds): Fix to treat arguments array correctly.
         }
-        ++iter;
         break;
       }
       default:
-        if (NodeProperties::IsEffectEdge(iter.edge())) {
-          iter.UpdateToAndIncrement(context);
-        } else if (NodeProperties::IsControlEdge(iter.edge())) {
-          iter.UpdateToAndIncrement(control);
+        if (NodeProperties::IsEffectEdge(edge)) {
+          edge.UpdateTo(context);
+        } else if (NodeProperties::IsControlEdge(edge)) {
+          edge.UpdateTo(control);
         } else {
           UNREACHABLE();
         }
@@ -285,22 +273,9 @@
     }
   }
 
-  // Iterate over all uses of the call node.
-  iter = call->uses().begin();
-  while (iter != call->uses().end()) {
-    if (NodeProperties::IsEffectEdge(iter.edge())) {
-      iter.UpdateToAndIncrement(effect_output());
-    } else if (NodeProperties::IsControlEdge(iter.edge())) {
-      UNREACHABLE();
-    } else {
-      DCHECK(NodeProperties::IsValueEdge(iter.edge()));
-      iter.UpdateToAndIncrement(value_output());
-    }
-  }
+  NodeProperties::ReplaceWithValue(call, value_output(), effect_output());
   call->RemoveAllInputs();
   DCHECK_EQ(0, call->UseCount());
-  // TODO(sigurds) Remove this once we copy.
-  unique_return()->RemoveAllInputs();
 }
 
 
@@ -323,7 +298,7 @@
 
   size_t formal_arguments() {
     // {value_inputs} includes jsfunction and receiver.
-    size_t value_inputs = OperatorProperties::GetValueInputCount(call_->op());
+    size_t value_inputs = call_->op()->ValueInputCount();
     DCHECK_GE(call_->InputCount(), 2);
     return value_inputs - 2;
   }
@@ -348,9 +323,9 @@
 Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
                                                   Handle<JSFunction> jsfunction,
                                                   Zone* temp_zone) {
-  const Operator* op =
-      jsgraph_->common()->FrameState(FrameStateType::ARGUMENTS_ADAPTOR,
-                                     BailoutId(-1), kIgnoreOutput, jsfunction);
+  const Operator* op = jsgraph_->common()->FrameState(
+      FrameStateType::ARGUMENTS_ADAPTOR, BailoutId(-1),
+      OutputFrameStateCombine::Ignore(), jsfunction);
   const Operator* op0 = jsgraph_->common()->StateValues(0);
   Node* node0 = jsgraph_->graph()->NewNode(op0);
   NodeVector params(temp_zone);
@@ -368,7 +343,7 @@
 }
 
 
-void JSInliner::TryInlineCall(Node* call_node) {
+void JSInliner::TryInlineJSCall(Node* call_node) {
   JSCallFunctionAccessor call(call_node);
 
   HeapObjectMatcher<JSFunction> match(call.jsfunction());
@@ -389,9 +364,11 @@
   }
 
   CompilationInfoWithZone info(function);
-  Parse(function, &info);
+  // TODO(wingo): ParseAndAnalyze can fail due to stack overflow.
+  CHECK(Compiler::ParseAndAnalyze(&info));
+  CHECK(Compiler::EnsureDeoptimizationSupport(&info));
 
-  if (info.scope()->arguments() != NULL) {
+  if (info.scope()->arguments() != NULL && info.strict_mode() != STRICT) {
     // For now do not inline functions that use their arguments array.
     SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
     if (FLAG_trace_turbo_inlining) {
@@ -410,11 +387,10 @@
   }
 
   Graph graph(info.zone());
-  Typer typer(info.zone());
-  JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(), &typer,
+  JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(),
                   jsgraph_->machine());
 
-  AstGraphBuilder graph_builder(&info, &jsgraph);
+  AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
   graph_builder.CreateGraph();
   Inlinee::UnifyReturn(&jsgraph);
 
@@ -423,24 +399,91 @@
 
   Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
 
-  Node* outer_frame_state = call.frame_state();
-  // Insert argument adaptor frame if required.
-  if (call.formal_arguments() != inlinee.formal_parameters()) {
-    outer_frame_state =
-        CreateArgumentsAdaptorFrameState(&call, function, info.zone());
-  }
+  if (FLAG_turbo_deoptimization) {
+    Node* outer_frame_state = call.frame_state();
+    // Insert argument adaptor frame if required.
+    if (call.formal_arguments() != inlinee.formal_parameters()) {
+      outer_frame_state =
+          CreateArgumentsAdaptorFrameState(&call, function, info.zone());
+    }
 
-  for (NodeVectorConstIter it = visitor.copies().begin();
-       it != visitor.copies().end(); ++it) {
-    Node* node = *it;
-    if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
-      AddClosureToFrameState(node, function);
-      NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+    for (NodeVectorConstIter it = visitor.copies().begin();
+         it != visitor.copies().end(); ++it) {
+      Node* node = *it;
+      if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+        AddClosureToFrameState(node, function);
+        NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+      }
     }
   }
 
   inlinee.InlineAtCall(jsgraph_, call_node);
 }
+
+
+class JSCallRuntimeAccessor {
+ public:
+  explicit JSCallRuntimeAccessor(Node* call) : call_(call) {
+    DCHECK_EQ(IrOpcode::kJSCallRuntime, call->opcode());
+  }
+
+  Node* formal_argument(size_t index) {
+    DCHECK(index < formal_arguments());
+    return call_->InputAt(static_cast<int>(index));
+  }
+
+  size_t formal_arguments() {
+    size_t value_inputs = call_->op()->ValueInputCount();
+    return value_inputs;
+  }
+
+  Node* frame_state() const {
+    return NodeProperties::GetFrameStateInput(call_);
+  }
+  Node* context() const { return NodeProperties::GetContextInput(call_); }
+  Node* control() const { return NodeProperties::GetControlInput(call_); }
+  Node* effect() const { return NodeProperties::GetEffectInput(call_); }
+
+  const Runtime::Function* function() const {
+    return Runtime::FunctionForId(CallRuntimeParametersOf(call_->op()).id());
+  }
+
+  NodeVector inputs(Zone* zone) const {
+    NodeVector inputs(zone);
+    for (Node* const node : call_->inputs()) {
+      inputs.push_back(node);
+    }
+    return inputs;
+  }
+
+ private:
+  Node* call_;
+};
+
+
+void JSInliner::TryInlineRuntimeCall(Node* call_node) {
+  JSCallRuntimeAccessor call(call_node);
+  const Runtime::Function* f = call.function();
+
+  if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) {
+    return;
+  }
+
+  JSIntrinsicBuilder intrinsic_builder(jsgraph_);
+
+  ResultAndEffect r = intrinsic_builder.BuildGraphFor(
+      f->function_id, call.inputs(jsgraph_->zone()));
+
+  if (r.first != NULL) {
+    if (FLAG_trace_turbo_inlining) {
+      PrintF("Inlining %s into %s\n", f->name,
+             info_->shared_info()->DebugName()->ToCString().get());
+    }
+    NodeProperties::ReplaceWithValue(call_node, r.first, r.second);
+    call_node->RemoveAllInputs();
+    DCHECK_EQ(0, call_node->UseCount());
+  }
+}
 }
 }
 }  // namespace v8::internal::compiler
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index f135170..eef29d6 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -16,14 +16,16 @@
 
 class JSInliner {
  public:
-  JSInliner(CompilationInfo* info, JSGraph* jsgraph)
-      : info_(info), jsgraph_(jsgraph) {}
+  JSInliner(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph)
+      : local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
 
   void Inline();
-  void TryInlineCall(Node* node);
+  void TryInlineJSCall(Node* node);
+  void TryInlineRuntimeCall(Node* node);
 
  private:
   friend class InlinerVisitor;
+  Zone* local_zone_;
   CompilationInfo* info_;
   JSGraph* jsgraph_;
 
diff --git a/src/compiler/js-intrinsic-builder.cc b/src/compiler/js-intrinsic-builder.cc
new file mode 100644
index 0000000..80b6968
--- /dev/null
+++ b/src/compiler/js-intrinsic-builder.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/js-intrinsic-builder.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/simplified-operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ResultAndEffect JSIntrinsicBuilder::BuildGraphFor(Runtime::FunctionId id,
+                                                  const NodeVector& arguments) {
+  switch (id) {
+    case Runtime::kInlineIsSmi:
+      return BuildGraphFor_IsSmi(arguments);
+    case Runtime::kInlineIsNonNegativeSmi:
+      return BuildGraphFor_IsNonNegativeSmi(arguments);
+    case Runtime::kInlineIsArray:
+      return BuildMapCheck(arguments[0], arguments[2], JS_ARRAY_TYPE);
+    case Runtime::kInlineIsRegExp:
+      return BuildMapCheck(arguments[0], arguments[2], JS_REGEXP_TYPE);
+    case Runtime::kInlineIsFunction:
+      return BuildMapCheck(arguments[0], arguments[2], JS_FUNCTION_TYPE);
+    case Runtime::kInlineValueOf:
+      return BuildGraphFor_ValueOf(arguments);
+    default:
+      break;
+  }
+  return ResultAndEffect();
+}
+
+ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsSmi(
+    const NodeVector& arguments) {
+  Node* object = arguments[0];
+  SimplifiedOperatorBuilder simplified(jsgraph_->zone());
+  Node* condition = graph()->NewNode(simplified.ObjectIsSmi(), object);
+
+  return ResultAndEffect(condition, arguments[2]);
+}
+
+
+ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_IsNonNegativeSmi(
+    const NodeVector& arguments) {
+  Node* object = arguments[0];
+  SimplifiedOperatorBuilder simplified(jsgraph_->zone());
+  Node* condition =
+      graph()->NewNode(simplified.ObjectIsNonNegativeSmi(), object);
+
+  return ResultAndEffect(condition, arguments[2]);
+}
+
+
+/*
+ * if (_isSmi(object)) {
+ *   return false
+ * } else {
+ *   return %_GetMapInstanceType(object) == map_type
+ * }
+ */
+ResultAndEffect JSIntrinsicBuilder::BuildMapCheck(Node* object, Node* effect,
+                                                  InstanceType map_type) {
+  SimplifiedOperatorBuilder simplified(jsgraph_->zone());
+
+  Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
+  Diamond d(graph(), common(), is_smi);
+
+  Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
+                               object, effect, d.if_false);
+
+  Node* instance_type = graph()->NewNode(
+      simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
+      d.if_false);
+
+  Node* has_map_type =
+      graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
+                       jsgraph_->Int32Constant(map_type));
+
+  Node* phi = d.Phi(static_cast<MachineType>(kTypeBool | kRepTagged),
+                    jsgraph_->FalseConstant(), has_map_type);
+
+  Node* ephi = d.EffectPhi(effect, instance_type);
+
+  return ResultAndEffect(phi, ephi);
+}
+
+
+/*
+ * if (%_isSmi(object)) {
+ *   return object;
+ * } else if (%_GetMapInstanceType(object) == JS_VALUE_TYPE) {
+ *   return %_LoadValueField(object);
+ * } else {
+ *   return object;
+ * }
+ */
+ResultAndEffect JSIntrinsicBuilder::BuildGraphFor_ValueOf(
+    const NodeVector& arguments) {
+  Node* object = arguments[0];
+  Node* effect = arguments[2];
+  SimplifiedOperatorBuilder simplified(jsgraph_->zone());
+
+  Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
+
+  Diamond if_is_smi(graph(), common(), is_smi);
+
+  Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
+                               object, effect, if_is_smi.if_false);
+
+  Node* instance_type = graph()->NewNode(
+      simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
+      if_is_smi.if_false);
+
+  Node* is_value =
+      graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
+                       jsgraph_->Constant(JS_VALUE_TYPE));
+
+  Diamond if_is_value(graph(), common(), is_value);
+  if_is_value.Nest(if_is_smi, false);
+
+  Node* value =
+      graph()->NewNode(simplified.LoadField(AccessBuilder::ForValue()), object,
+                       instance_type, if_is_value.if_true);
+
+  Node* phi_is_value = if_is_value.Phi(kTypeAny, value, object);
+
+  Node* phi = if_is_smi.Phi(kTypeAny, object, phi_is_value);
+
+  Node* ephi = if_is_smi.EffectPhi(effect, instance_type);
+
+  return ResultAndEffect(phi, ephi);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-intrinsic-builder.h b/src/compiler/js-intrinsic-builder.h
new file mode 100644
index 0000000..9336be6
--- /dev/null
+++ b/src/compiler/js-intrinsic-builder.h
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INTRINSIC_BUILDER_H_
+#define V8_COMPILER_JS_INTRINSIC_BUILDER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef std::pair<Node*, Node*> ResultAndEffect;
+
+class JSIntrinsicBuilder {
+ public:
+  explicit JSIntrinsicBuilder(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+
+  ResultAndEffect BuildGraphFor(Runtime::FunctionId id,
+                                const NodeVector& arguments);
+
+ private:
+  ResultAndEffect BuildMapCheck(Node* object, Node* effect,
+                                InstanceType map_type);
+  ResultAndEffect BuildGraphFor_IsSmi(const NodeVector& arguments);
+  ResultAndEffect BuildGraphFor_IsNonNegativeSmi(const NodeVector& arguments);
+  ResultAndEffect BuildGraphFor_ValueOf(const NodeVector& arguments);
+
+
+  Graph* graph() const { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+  JSGraph* jsgraph_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_INTRINSIC_BUILDER_H_
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
new file mode 100644
index 0000000..aa76a3b
--- /dev/null
+++ b/src/compiler/js-operator.cc
@@ -0,0 +1,411 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-operator.h"
+
+#include <limits>
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool operator==(CallFunctionParameters const& lhs,
+                CallFunctionParameters const& rhs) {
+  return lhs.arity() == rhs.arity() && lhs.flags() == rhs.flags();
+}
+
+
+bool operator!=(CallFunctionParameters const& lhs,
+                CallFunctionParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(CallFunctionParameters const& p) {
+  return base::hash_combine(p.arity(), p.flags());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+  return os << p.arity() << ", " << p.flags();
+}
+
+
+const CallFunctionParameters& CallFunctionParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSCallFunction, op->opcode());
+  return OpParameter<CallFunctionParameters>(op);
+}
+
+
+bool operator==(CallRuntimeParameters const& lhs,
+                CallRuntimeParameters const& rhs) {
+  return lhs.id() == rhs.id() && lhs.arity() == rhs.arity();
+}
+
+
+bool operator!=(CallRuntimeParameters const& lhs,
+                CallRuntimeParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(CallRuntimeParameters const& p) {
+  return base::hash_combine(p.id(), p.arity());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallRuntimeParameters const& p) {
+  return os << p.id() << ", " << p.arity();
+}
+
+
+const CallRuntimeParameters& CallRuntimeParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSCallRuntime, op->opcode());
+  return OpParameter<CallRuntimeParameters>(op);
+}
+
+
+ContextAccess::ContextAccess(size_t depth, size_t index, bool immutable)
+    : immutable_(immutable),
+      depth_(static_cast<uint16_t>(depth)),
+      index_(static_cast<uint32_t>(index)) {
+  DCHECK(depth <= std::numeric_limits<uint16_t>::max());
+  DCHECK(index <= std::numeric_limits<uint32_t>::max());
+}
+
+
+bool operator==(ContextAccess const& lhs, ContextAccess const& rhs) {
+  return lhs.depth() == rhs.depth() && lhs.index() == rhs.index() &&
+         lhs.immutable() == rhs.immutable();
+}
+
+
+bool operator!=(ContextAccess const& lhs, ContextAccess const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(ContextAccess const& access) {
+  return base::hash_combine(access.depth(), access.index(), access.immutable());
+}
+
+
+std::ostream& operator<<(std::ostream& os, ContextAccess const& access) {
+  return os << access.depth() << ", " << access.index() << ", "
+            << access.immutable();
+}
+
+
+ContextAccess const& ContextAccessOf(Operator const* op) {
+  DCHECK(op->opcode() == IrOpcode::kJSLoadContext ||
+         op->opcode() == IrOpcode::kJSStoreContext);
+  return OpParameter<ContextAccess>(op);
+}
+
+
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
+  return lhs.slot().ToInt() == rhs.slot().ToInt() &&
+         lhs.vector().is_identical_to(rhs.vector());
+}
+
+
+size_t hash_value(VectorSlotPair const& p) {
+  // TODO(mvstanton): include the vector in the hash.
+  base::hash<int> h;
+  return h(p.slot().ToInt());
+}
+
+
+bool operator==(LoadNamedParameters const& lhs,
+                LoadNamedParameters const& rhs) {
+  return lhs.name() == rhs.name() &&
+         lhs.contextual_mode() == rhs.contextual_mode() &&
+         lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(LoadNamedParameters const& lhs,
+                LoadNamedParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(LoadNamedParameters const& p) {
+  return base::hash_combine(p.name(), p.contextual_mode(), p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
+  return os << Brief(*p.name().handle()) << ", " << p.contextual_mode();
+}
+
+
+std::ostream& operator<<(std::ostream& os, LoadPropertyParameters const& p) {
+  // Nothing special to print.
+  return os;
+}
+
+
+bool operator==(LoadPropertyParameters const& lhs,
+                LoadPropertyParameters const& rhs) {
+  return lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(LoadPropertyParameters const& lhs,
+                LoadPropertyParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
+  return OpParameter<LoadPropertyParameters>(op);
+}
+
+
+size_t hash_value(LoadPropertyParameters const& p) {
+  return hash_value(p.feedback());
+}
+
+
+const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
+  return OpParameter<LoadNamedParameters>(op);
+}
+
+
+bool operator==(StoreNamedParameters const& lhs,
+                StoreNamedParameters const& rhs) {
+  return lhs.strict_mode() == rhs.strict_mode() && lhs.name() == rhs.name();
+}
+
+
+bool operator!=(StoreNamedParameters const& lhs,
+                StoreNamedParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(StoreNamedParameters const& p) {
+  return base::hash_combine(p.strict_mode(), p.name());
+}
+
+
+std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
+  return os << p.strict_mode() << ", " << Brief(*p.name().handle());
+}
+
+
+const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSStoreNamed, op->opcode());
+  return OpParameter<StoreNamedParameters>(op);
+}
+
+
+#define CACHED_OP_LIST(V)                                 \
+  V(Equal, Operator::kNoProperties, 2, 1)                 \
+  V(NotEqual, Operator::kNoProperties, 2, 1)              \
+  V(StrictEqual, Operator::kPure, 2, 1)                   \
+  V(StrictNotEqual, Operator::kPure, 2, 1)                \
+  V(LessThan, Operator::kNoProperties, 2, 1)              \
+  V(GreaterThan, Operator::kNoProperties, 2, 1)           \
+  V(LessThanOrEqual, Operator::kNoProperties, 2, 1)       \
+  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)    \
+  V(BitwiseOr, Operator::kNoProperties, 2, 1)             \
+  V(BitwiseXor, Operator::kNoProperties, 2, 1)            \
+  V(BitwiseAnd, Operator::kNoProperties, 2, 1)            \
+  V(ShiftLeft, Operator::kNoProperties, 2, 1)             \
+  V(ShiftRight, Operator::kNoProperties, 2, 1)            \
+  V(ShiftRightLogical, Operator::kNoProperties, 2, 1)     \
+  V(Add, Operator::kNoProperties, 2, 1)                   \
+  V(Subtract, Operator::kNoProperties, 2, 1)              \
+  V(Multiply, Operator::kNoProperties, 2, 1)              \
+  V(Divide, Operator::kNoProperties, 2, 1)                \
+  V(Modulus, Operator::kNoProperties, 2, 1)               \
+  V(UnaryNot, Operator::kPure, 1, 1)                      \
+  V(ToBoolean, Operator::kPure, 1, 1)                     \
+  V(ToNumber, Operator::kNoProperties, 1, 1)              \
+  V(ToString, Operator::kNoProperties, 1, 1)              \
+  V(ToName, Operator::kNoProperties, 1, 1)                \
+  V(ToObject, Operator::kNoProperties, 1, 1)              \
+  V(Yield, Operator::kNoProperties, 1, 1)                 \
+  V(Create, Operator::kEliminatable, 0, 1)                \
+  V(HasProperty, Operator::kNoProperties, 2, 1)           \
+  V(TypeOf, Operator::kPure, 1, 1)                        \
+  V(InstanceOf, Operator::kNoProperties, 2, 1)            \
+  V(Debugger, Operator::kNoProperties, 0, 0)              \
+  V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
+  V(CreateWithContext, Operator::kNoProperties, 2, 1)     \
+  V(CreateBlockContext, Operator::kNoProperties, 2, 1)    \
+  V(CreateModuleContext, Operator::kNoProperties, 2, 1)   \
+  V(CreateScriptContext, Operator::kNoProperties, 2, 1)
+
+
+struct JSOperatorGlobalCache FINAL {
+#define CACHED(Name, properties, value_input_count, value_output_count)  \
+  struct Name##Operator FINAL : public Operator {                        \
+    Name##Operator()                                                     \
+        : Operator(IrOpcode::kJS##Name, properties, "JS" #Name,          \
+                   value_input_count, Operator::ZeroIfPure(properties),  \
+                   Operator::ZeroIfPure(properties), value_output_count, \
+                   Operator::ZeroIfPure(properties), 0) {}               \
+  };                                                                     \
+  Name##Operator k##Name##Operator;
+  CACHED_OP_LIST(CACHED)
+#undef CACHED
+
+  template <StrictMode kStrictMode>
+  struct StorePropertyOperator FINAL : public Operator1<StrictMode> {
+    StorePropertyOperator()
+        : Operator1<StrictMode>(IrOpcode::kJSStoreProperty,
+                                Operator::kNoProperties, "JSStoreProperty", 3,
+                                1, 1, 0, 1, 0, kStrictMode) {}
+  };
+  StorePropertyOperator<SLOPPY> kStorePropertySloppyOperator;
+  StorePropertyOperator<STRICT> kStorePropertyStrictOperator;
+};
+
+
+static base::LazyInstance<JSOperatorGlobalCache>::type kCache =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
+    : cache_(kCache.Get()), zone_(zone) {}
+
+
+#define CACHED(Name, properties, value_input_count, value_output_count) \
+  const Operator* JSOperatorBuilder::Name() {                           \
+    return &cache_.k##Name##Operator;                                   \
+  }
+CACHED_OP_LIST(CACHED)
+#undef CACHED
+
+
+const Operator* JSOperatorBuilder::CallFunction(size_t arity,
+                                                CallFunctionFlags flags) {
+  CallFunctionParameters parameters(arity, flags);
+  return new (zone()) Operator1<CallFunctionParameters>(   // --
+      IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
+      "JSCallFunction",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 0,                   // inputs/outputs
+      parameters);                                         // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
+                                               size_t arity) {
+  CallRuntimeParameters parameters(id, arity);
+  const Runtime::Function* f = Runtime::FunctionForId(parameters.id());
+  DCHECK(f->nargs == -1 || f->nargs == static_cast<int>(parameters.arity()));
+  return new (zone()) Operator1<CallRuntimeParameters>(   // --
+      IrOpcode::kJSCallRuntime, Operator::kNoProperties,  // opcode
+      "JSCallRuntime",                                    // name
+      parameters.arity(), 1, 1, f->result_size, 1, 0,     // inputs/outputs
+      parameters);                                        // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
+  return new (zone()) Operator1<int>(                       // --
+      IrOpcode::kJSCallConstruct, Operator::kNoProperties,  // opcode
+      "JSCallConstruct",                                    // name
+      arguments, 1, 1, 1, 1, 0,                             // counts
+      arguments);                                           // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
+                                             const VectorSlotPair& feedback,
+                                             ContextualMode contextual_mode) {
+  LoadNamedParameters parameters(name, feedback, contextual_mode);
+  return new (zone()) Operator1<LoadNamedParameters>(   // --
+      IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
+      "JSLoadNamed",                                    // name
+      1, 1, 1, 1, 1, 0,                                 // counts
+      parameters);                                      // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadProperty(
+    const VectorSlotPair& feedback) {
+  LoadPropertyParameters parameters(feedback);
+  return new (zone()) Operator1<LoadPropertyParameters>(   // --
+      IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
+      "JSLoadProperty",                                    // name
+      2, 1, 1, 1, 1, 0,                                    // counts
+      parameters);                                         // parameter
+}
+
+
+const Operator* JSOperatorBuilder::StoreProperty(StrictMode strict_mode) {
+  switch (strict_mode) {
+    case SLOPPY:
+      return &cache_.kStorePropertySloppyOperator;
+    case STRICT:
+      return &cache_.kStorePropertyStrictOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+
+const Operator* JSOperatorBuilder::StoreNamed(StrictMode strict_mode,
+                                              const Unique<Name>& name) {
+  StoreNamedParameters parameters(strict_mode, name);
+  return new (zone()) Operator1<StoreNamedParameters>(   // --
+      IrOpcode::kJSStoreNamed, Operator::kNoProperties,  // opcode
+      "JSStoreNamed",                                    // name
+      2, 1, 1, 0, 1, 0,                                  // counts
+      parameters);                                       // parameter
+}
+
+
+const Operator* JSOperatorBuilder::DeleteProperty(StrictMode strict_mode) {
+  return new (zone()) Operator1<StrictMode>(                 // --
+      IrOpcode::kJSDeleteProperty, Operator::kNoProperties,  // opcode
+      "JSDeleteProperty",                                    // name
+      2, 1, 1, 1, 1, 0,                                      // counts
+      strict_mode);                                          // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
+                                               bool immutable) {
+  ContextAccess access(depth, index, immutable);
+  return new (zone()) Operator1<ContextAccess>(      // --
+      IrOpcode::kJSLoadContext, Operator::kNoWrite,  // opcode
+      "JSLoadContext",                               // name
+      1, 1, 0, 1, 1, 0,                              // counts
+      access);                                       // parameter
+}
+
+
+const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
+  ContextAccess access(depth, index, false);
+  return new (zone()) Operator1<ContextAccess>(      // --
+      IrOpcode::kJSStoreContext, Operator::kNoRead,  // opcode
+      "JSStoreContext",                              // name
+      2, 1, 1, 0, 1, 0,                              // counts
+      access);                                       // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateCatchContext(
+    const Unique<String>& name) {
+  return new (zone()) Operator1<Unique<String>>(                 // --
+      IrOpcode::kJSCreateCatchContext, Operator::kNoProperties,  // opcode
+      "JSCreateCatchContext",                                    // name
+      1, 1, 1, 1, 1, 0,                                          // counts
+      name);                                                     // parameter
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index b95467f..e716a8e 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -5,28 +5,77 @@
 #ifndef V8_COMPILER_JS_OPERATOR_H_
 #define V8_COMPILER_JS_OPERATOR_H_
 
-#include "src/compiler/linkage.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
+#include "src/runtime/runtime.h"
 #include "src/unique.h"
-#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class Operator;
+struct JSOperatorGlobalCache;
+
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCallFunction operators.
+class CallFunctionParameters FINAL {
+ public:
+  CallFunctionParameters(size_t arity, CallFunctionFlags flags)
+      : arity_(arity), flags_(flags) {}
+
+  size_t arity() const { return arity_; }
+  CallFunctionFlags flags() const { return flags_; }
+
+ private:
+  const size_t arity_;
+  const CallFunctionFlags flags_;
+};
+
+bool operator==(CallFunctionParameters const&, CallFunctionParameters const&);
+bool operator!=(CallFunctionParameters const&, CallFunctionParameters const&);
+
+size_t hash_value(CallFunctionParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallFunctionParameters const&);
+
+const CallFunctionParameters& CallFunctionParametersOf(const Operator* op);
+
+
+// Defines the arity and the ID for a runtime function call. This is used as a
+// parameter by JSCallRuntime operators.
+class CallRuntimeParameters FINAL {
+ public:
+  CallRuntimeParameters(Runtime::FunctionId id, size_t arity)
+      : id_(id), arity_(arity) {}
+
+  Runtime::FunctionId id() const { return id_; }
+  size_t arity() const { return arity_; }
+
+ private:
+  const Runtime::FunctionId id_;
+  const size_t arity_;
+};
+
+bool operator==(CallRuntimeParameters const&, CallRuntimeParameters const&);
+bool operator!=(CallRuntimeParameters const&, CallRuntimeParameters const&);
+
+size_t hash_value(CallRuntimeParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallRuntimeParameters const&);
+
+const CallRuntimeParameters& CallRuntimeParametersOf(const Operator* op);
+
+
 // Defines the location of a context slot relative to a specific scope. This is
 // used as a parameter by JSLoadContext and JSStoreContext operators and allows
 // accessing a context-allocated variable without keeping track of the scope.
-class ContextAccess {
+class ContextAccess FINAL {
  public:
-  ContextAccess(int depth, int index, bool immutable)
-      : immutable_(immutable), depth_(depth), index_(index) {
-    DCHECK(0 <= depth && depth <= kMaxUInt16);
-    DCHECK(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
-  }
-  int depth() const { return depth_; }
-  int index() const { return index_; }
+  ContextAccess(size_t depth, size_t index, bool immutable);
+
+  size_t depth() const { return depth_; }
+  size_t index() const { return index_; }
   bool immutable() const { return immutable_; }
 
  private:
@@ -37,193 +86,188 @@
   const uint32_t index_;
 };
 
-// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed operators.
-struct LoadNamedParameters {
-  Unique<Name> name;
-  ContextualMode contextual_mode;
+bool operator==(ContextAccess const&, ContextAccess const&);
+bool operator!=(ContextAccess const&, ContextAccess const&);
+
+size_t hash_value(ContextAccess const&);
+
+std::ostream& operator<<(std::ostream&, ContextAccess const&);
+
+ContextAccess const& ContextAccessOf(Operator const*);
+
+
+class VectorSlotPair {
+ public:
+  VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+      : vector_(vector), slot_(slot) {}
+
+  Handle<TypeFeedbackVector> vector() const { return vector_; }
+  FeedbackVectorICSlot slot() const { return slot_; }
+
+  int index() const { return vector_->GetIndex(slot_); }
+
+ private:
+  const Handle<TypeFeedbackVector> vector_;
+  const FeedbackVectorICSlot slot_;
 };
 
-// Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCall operators.
-struct CallParameters {
-  int arity;
-  CallFunctionFlags flags;
+
+bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs);
+
+
+// Defines the property being loaded from an object by a named load. This is
+// used as a parameter by JSLoadNamed operators.
+class LoadNamedParameters FINAL {
+ public:
+  LoadNamedParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
+                      ContextualMode contextual_mode)
+      : name_(name), contextual_mode_(contextual_mode), feedback_(feedback) {}
+
+  const Unique<Name>& name() const { return name_; }
+  ContextualMode contextual_mode() const { return contextual_mode_; }
+
+  const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+  const Unique<Name> name_;
+  const ContextualMode contextual_mode_;
+  const VectorSlotPair feedback_;
 };
 
+bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
+bool operator!=(LoadNamedParameters const&, LoadNamedParameters const&);
+
+size_t hash_value(LoadNamedParameters const&);
+
+std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
+
+const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
+
+
+// Defines the property being loaded from an object. This is
+// used as a parameter by JSLoadProperty operators.
+class LoadPropertyParameters FINAL {
+ public:
+  explicit LoadPropertyParameters(const VectorSlotPair& feedback)
+      : feedback_(feedback) {}
+
+  const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+  const VectorSlotPair feedback_;
+};
+
+bool operator==(LoadPropertyParameters const&, LoadPropertyParameters const&);
+bool operator!=(LoadPropertyParameters const&, LoadPropertyParameters const&);
+
+size_t hash_value(LoadPropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, LoadPropertyParameters const&);
+
+const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
+
+
 // Defines the property being stored to an object by a named store. This is
 // used as a parameter by JSStoreNamed operators.
-struct StoreNamedParameters {
-  StrictMode strict_mode;
-  Unique<Name> name;
+class StoreNamedParameters FINAL {
+ public:
+  StoreNamedParameters(StrictMode strict_mode, const Unique<Name>& name)
+      : strict_mode_(strict_mode), name_(name) {}
+
+  StrictMode strict_mode() const { return strict_mode_; }
+  const Unique<Name>& name() const { return name_; }
+
+ private:
+  const StrictMode strict_mode_;
+  const Unique<Name> name_;
 };
 
+bool operator==(StoreNamedParameters const&, StoreNamedParameters const&);
+bool operator!=(StoreNamedParameters const&, StoreNamedParameters const&);
+
+size_t hash_value(StoreNamedParameters const&);
+
+std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
+
+const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
+
+
 // Interface for building JavaScript-level operators, e.g. directly from the
 // AST. Most operators have no parameters, thus can be globally shared for all
 // graphs.
-class JSOperatorBuilder {
+class JSOperatorBuilder FINAL : public ZoneObject {
  public:
-  explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
+  explicit JSOperatorBuilder(Zone* zone);
 
-#define SIMPLE(name, properties, inputs, outputs) \
-  return new (zone_)                              \
-      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+  const Operator* Equal();
+  const Operator* NotEqual();
+  const Operator* StrictEqual();
+  const Operator* StrictNotEqual();
+  const Operator* LessThan();
+  const Operator* GreaterThan();
+  const Operator* LessThanOrEqual();
+  const Operator* GreaterThanOrEqual();
+  const Operator* BitwiseOr();
+  const Operator* BitwiseXor();
+  const Operator* BitwiseAnd();
+  const Operator* ShiftLeft();
+  const Operator* ShiftRight();
+  const Operator* ShiftRightLogical();
+  const Operator* Add();
+  const Operator* Subtract();
+  const Operator* Multiply();
+  const Operator* Divide();
+  const Operator* Modulus();
 
-#define NOPROPS(name, inputs, outputs) \
-  SIMPLE(name, Operator::kNoProperties, inputs, outputs)
+  const Operator* UnaryNot();
+  const Operator* ToBoolean();
+  const Operator* ToNumber();
+  const Operator* ToString();
+  const Operator* ToName();
+  const Operator* ToObject();
+  const Operator* Yield();
 
-#define OP1(name, ptype, pname, properties, inputs, outputs)                 \
-  return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
-                                      outputs, #name, pname)
+  const Operator* Create();
 
-#define BINOP(name) NOPROPS(name, 2, 1)
-#define UNOP(name) NOPROPS(name, 1, 1)
+  const Operator* CallFunction(size_t arity, CallFunctionFlags flags);
+  const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
 
-#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+  const Operator* CallConstruct(int arguments);
 
-  const Operator* Equal() { BINOP(JSEqual); }
-  const Operator* NotEqual() { BINOP(JSNotEqual); }
-  const Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
-  const Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
-  const Operator* LessThan() { BINOP(JSLessThan); }
-  const Operator* GreaterThan() { BINOP(JSGreaterThan); }
-  const Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
-  const Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
-  const Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
-  const Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
-  const Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
-  const Operator* ShiftLeft() { BINOP(JSShiftLeft); }
-  const Operator* ShiftRight() { BINOP(JSShiftRight); }
-  const Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
-  const Operator* Add() { BINOP(JSAdd); }
-  const Operator* Subtract() { BINOP(JSSubtract); }
-  const Operator* Multiply() { BINOP(JSMultiply); }
-  const Operator* Divide() { BINOP(JSDivide); }
-  const Operator* Modulus() { BINOP(JSModulus); }
+  const Operator* LoadProperty(const VectorSlotPair& feedback);
+  const Operator* LoadNamed(const Unique<Name>& name,
+                            const VectorSlotPair& feedback,
+                            ContextualMode contextual_mode = NOT_CONTEXTUAL);
 
-  const Operator* UnaryNot() { UNOP(JSUnaryNot); }
-  const Operator* ToBoolean() { UNOP(JSToBoolean); }
-  const Operator* ToNumber() { UNOP(JSToNumber); }
-  const Operator* ToString() { UNOP(JSToString); }
-  const Operator* ToName() { UNOP(JSToName); }
-  const Operator* ToObject() { UNOP(JSToObject); }
-  const Operator* Yield() { UNOP(JSYield); }
+  const Operator* StoreProperty(StrictMode strict_mode);
+  const Operator* StoreNamed(StrictMode strict_mode, const Unique<Name>& name);
 
-  const Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+  const Operator* DeleteProperty(StrictMode strict_mode);
 
-  const Operator* Call(int arguments, CallFunctionFlags flags) {
-    CallParameters parameters = {arguments, flags};
-    OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
-        arguments, 1);
-  }
+  const Operator* HasProperty();
 
-  const Operator* CallNew(int arguments) {
-    return new (zone_)
-        Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
-                       arguments, 1, "JSCallConstruct", arguments);
-  }
+  const Operator* LoadContext(size_t depth, size_t index, bool immutable);
+  const Operator* StoreContext(size_t depth, size_t index);
 
-  const Operator* LoadProperty() { BINOP(JSLoadProperty); }
-  const Operator* LoadNamed(Unique<Name> name,
-                            ContextualMode contextual_mode = NOT_CONTEXTUAL) {
-    LoadNamedParameters parameters = {name, contextual_mode};
-    OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties,
-        1, 1);
-  }
-
-  const Operator* StoreProperty(StrictMode strict_mode) {
-    OP1(JSStoreProperty, StrictMode, strict_mode, Operator::kNoProperties, 3,
-        0);
-  }
-
-  const Operator* StoreNamed(StrictMode strict_mode, Unique<Name> name) {
-    StoreNamedParameters parameters = {strict_mode, name};
-    OP1(JSStoreNamed, StoreNamedParameters, parameters, Operator::kNoProperties,
-        2, 0);
-  }
-
-  const Operator* DeleteProperty(StrictMode strict_mode) {
-    OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
-        1);
-  }
-
-  const Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
-
-  const Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
-    ContextAccess access(depth, index, immutable);
-    OP1(JSLoadContext, ContextAccess, access,
-        Operator::kEliminatable | Operator::kNoWrite, 1, 1);
-  }
-  const Operator* StoreContext(uint16_t depth, uint32_t index) {
-    ContextAccess access(depth, index, false);
-    OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 0);
-  }
-
-  const Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
-  const Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
-  const Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+  const Operator* TypeOf();
+  const Operator* InstanceOf();
+  const Operator* Debugger();
 
   // TODO(titzer): nail down the static parts of each of these context flavors.
-  const Operator* CreateFunctionContext() {
-    NOPROPS(JSCreateFunctionContext, 1, 1);
-  }
-  const Operator* CreateCatchContext(Unique<String> name) {
-    OP1(JSCreateCatchContext, Unique<String>, name, Operator::kNoProperties, 1,
-        1);
-  }
-  const Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
-  const Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
-  const Operator* CreateModuleContext() {
-    NOPROPS(JSCreateModuleContext, 2, 1);
-  }
-  const Operator* CreateGlobalContext() {
-    NOPROPS(JSCreateGlobalContext, 2, 1);
-  }
-
-  const Operator* Runtime(Runtime::FunctionId function, int arguments) {
-    const Runtime::Function* f = Runtime::FunctionForId(function);
-    DCHECK(f->nargs == -1 || f->nargs == arguments);
-    OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
-        arguments, f->result_size);
-  }
-
-#undef SIMPLE
-#undef NOPROPS
-#undef OP1
-#undef BINOP
-#undef UNOP
+  const Operator* CreateFunctionContext();
+  const Operator* CreateCatchContext(const Unique<String>& name);
+  const Operator* CreateWithContext();
+  const Operator* CreateBlockContext();
+  const Operator* CreateModuleContext();
+  const Operator* CreateScriptContext();
 
  private:
-  Zone* zone_;
-};
+  Zone* zone() const { return zone_; }
 
-// Specialization for static parameters of type {ContextAccess}.
-template <>
-struct StaticParameterTraits<ContextAccess> {
-  static OStream& PrintTo(OStream& os, ContextAccess val) {  // NOLINT
-    return os << val.depth() << "," << val.index()
-              << (val.immutable() ? ",imm" : "");
-  }
-  static int HashCode(ContextAccess val) {
-    return (val.depth() << 16) | (val.index() & 0xffff);
-  }
-  static bool Equals(ContextAccess a, ContextAccess b) {
-    return a.immutable() == b.immutable() && a.depth() == b.depth() &&
-           a.index() == b.index();
-  }
-};
+  const JSOperatorGlobalCache& cache_;
+  Zone* const zone_;
 
-// Specialization for static parameters of type {Runtime::FunctionId}.
-template <>
-struct StaticParameterTraits<Runtime::FunctionId> {
-  static OStream& PrintTo(OStream& os, Runtime::FunctionId val) {  // NOLINT
-    const Runtime::Function* f = Runtime::FunctionForId(val);
-    return os << (f->name ? f->name : "?Runtime?");
-  }
-  static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
-  static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
-    return a == b;
-  }
+  DISALLOW_COPY_AND_ASSIGN(JSOperatorBuilder);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index be12534..7618375 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -4,9 +4,10 @@
 
 #include "src/compiler/access-builder.h"
 #include "src/compiler/graph-inl.h"
-#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/js-typed-lowering.h"
 #include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/types.h"
 
@@ -17,7 +18,6 @@
 // TODO(turbofan): js-typed-lowering improvements possible
 // - immediately put in type bounds for all new nodes
 // - relax effects from generic but not-side-effecting operations
-// - relax effects for ToNumber(mixed)
 
 
 // Relax the effects of {node} by immediately replacing effect uses of {node}
@@ -29,7 +29,32 @@
 }
 
 
-JSTypedLowering::~JSTypedLowering() {}
+JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
+    : jsgraph_(jsgraph), simplified_(graph()->zone()), conversions_(zone) {
+  Handle<Object> zero = factory()->NewNumber(0.0);
+  Handle<Object> one = factory()->NewNumber(1.0);
+  zero_range_ = Type::Range(zero, zero, graph()->zone());
+  one_range_ = Type::Range(one, one, graph()->zone());
+  Handle<Object> thirtyone = factory()->NewNumber(31.0);
+  zero_thirtyone_range_ = Type::Range(zero, thirtyone, graph()->zone());
+  // TODO(jarin): Can we have a correctification of the stupid type system?
+  // These stupid work-arounds are just stupid!
+  shifted_int32_ranges_[0] = Type::Signed32();
+  if (SmiValuesAre31Bits()) {
+    shifted_int32_ranges_[1] = Type::SignedSmall();
+    for (size_t k = 2; k < arraysize(shifted_int32_ranges_); ++k) {
+      Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
+      Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+      shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+    }
+  } else {
+    for (size_t k = 1; k < arraysize(shifted_int32_ranges_); ++k) {
+      Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
+      Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+      shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+    }
+  }
+}
 
 
 Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
@@ -42,7 +67,7 @@
 // JSOperator. This class manages the rewriting of context, control, and effect
 // dependencies during lowering of a binop and contains numerous helper
 // functions for matching the types of inputs to an operation.
-class JSBinopReduction {
+class JSBinopReduction FINAL {
  public:
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering),
@@ -55,9 +80,10 @@
     node_->ReplaceInput(1, ConvertToNumber(right()));
   }
 
-  void ConvertInputsToInt32(bool left_signed, bool right_signed) {
-    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
-    node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+  void ConvertInputsToUI32(Signedness left_signedness,
+                           Signedness right_signedness) {
+    node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
+    node_->ReplaceInput(1, ConvertToUI32(right(), right_signedness));
   }
 
   void ConvertInputsToString() {
@@ -66,11 +92,15 @@
   }
 
   // Convert inputs for bitwise shift operation (ES5 spec 11.7).
-  void ConvertInputsForShift(bool left_signed) {
-    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
-    Node* rnum = ConvertToI32(false, right());
-    node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
-                                            jsgraph()->Int32Constant(0x1F)));
+  void ConvertInputsForShift(Signedness left_signedness) {
+    node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
+    Node* rnum = ConvertToUI32(right(), kUnsigned);
+    Type* rnum_type = NodeProperties::GetBounds(rnum).upper;
+    if (!rnum_type->Is(lowering_->zero_thirtyone_range_)) {
+      rnum = graph()->NewNode(machine()->Word32And(), rnum,
+                              jsgraph()->Int32Constant(0x1F));
+    }
+    node_->ReplaceInput(1, rnum);
   }
 
   void SwapInputs() {
@@ -83,14 +113,15 @@
 
   // Remove all effect and control inputs and outputs to this node and change
   // to the pure operator {op}, possibly inserting a boolean inversion.
-  Reduction ChangeToPureOperator(const Operator* op, bool invert = false) {
-    DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op));
+  Reduction ChangeToPureOperator(const Operator* op, bool invert = false,
+                                 Type* type = Type::Any()) {
+    DCHECK_EQ(0, op->EffectInputCount());
     DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
-    DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op));
-    DCHECK_EQ(2, OperatorProperties::GetValueInputCount(op));
+    DCHECK_EQ(0, op->ControlInputCount());
+    DCHECK_EQ(2, op->ValueInputCount());
 
     // Remove the effects from the node, if any, and update its effect usages.
-    if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
+    if (node_->op()->EffectInputCount() > 0) {
       RelaxEffects(node_);
     }
     // Remove the inputs corresponding to context, effect, and control.
@@ -98,17 +129,26 @@
     // Finally, update the operator to the new one.
     node_->set_op(op);
 
+    // TODO(jarin): Replace the explicit typing hack with a call to some method
+    // that encapsulates changing the operator and re-typing.
+    Bounds const bounds = NodeProperties::GetBounds(node_);
+    NodeProperties::SetBounds(node_, Bounds::NarrowUpper(bounds, type, zone()));
+
     if (invert) {
       // Insert an boolean not to invert the value.
       Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
       node_->ReplaceUses(value);
       // Note: ReplaceUses() smashes all uses, so smash it back here.
       value->ReplaceInput(0, node_);
-      return lowering_->ReplaceWith(value);
+      return lowering_->Replace(value);
     }
     return lowering_->Changed(node_);
   }
 
+  Reduction ChangeToPureOperator(const Operator* op, Type* type) {
+    return ChangeToPureOperator(op, false, type);
+  }
+
   bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
 
   bool BothInputsAre(Type* t) {
@@ -132,10 +172,11 @@
   Type* right_type() { return right_type_; }
 
   SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
-  Graph* graph() { return lowering_->graph(); }
+  Graph* graph() const { return lowering_->graph(); }
   JSGraph* jsgraph() { return lowering_->jsgraph(); }
   JSOperatorBuilder* javascript() { return lowering_->javascript(); }
   MachineOperatorBuilder* machine() { return lowering_->machine(); }
+  Zone* zone() const { return graph()->zone(); }
 
  private:
   JSTypedLowering* lowering_;  // The containing lowering instance.
@@ -154,65 +195,30 @@
   }
 
   Node* ConvertToNumber(Node* node) {
-    // Avoid introducing too many eager ToNumber() operations.
-    Reduction reduced = lowering_->ReduceJSToNumberInput(node);
-    if (reduced.Changed()) return reduced.replacement();
+    if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
+      return lowering_->ConvertToNumber(node);
+    }
     Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
                                effect(), control());
     update_effect(n);
     return n;
   }
 
-  // Try to narrowing a double or number operation to an Int32 operation.
-  bool TryNarrowingToI32(Type* type, Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kFloat64Add:
-      case IrOpcode::kNumberAdd: {
-        JSBinopReduction r(lowering_, node);
-        if (r.BothInputsAre(Type::Integral32())) {
-          node->set_op(lowering_->machine()->Int32Add());
-          // TODO(titzer): narrow bounds instead of overwriting.
-          NodeProperties::SetBounds(node, Bounds(type));
-          return true;
-        }
-      }
-      case IrOpcode::kFloat64Sub:
-      case IrOpcode::kNumberSubtract: {
-        JSBinopReduction r(lowering_, node);
-        if (r.BothInputsAre(Type::Integral32())) {
-          node->set_op(lowering_->machine()->Int32Sub());
-          // TODO(titzer): narrow bounds instead of overwriting.
-          NodeProperties::SetBounds(node, Bounds(type));
-          return true;
-        }
-      }
-      default:
-        return false;
-    }
-  }
-
-  Node* ConvertToI32(bool is_signed, Node* node) {
-    Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
-    if (node->OwnedBy(node_)) {
-      // If this node {node_} has the only edge to {node}, then try narrowing
-      // its operation to an Int32 add or subtract.
-      if (TryNarrowingToI32(type, node)) return node;
-    } else {
-      // Otherwise, {node} has multiple uses. Leave it as is and let the
-      // further lowering passes deal with it, which use a full backwards
-      // fixpoint.
-    }
-
+  Node* ConvertToUI32(Node* node, Signedness signedness) {
     // Avoid introducing too many eager NumberToXXnt32() operations.
     node = ConvertToNumber(node);
-    Type* input_type = NodeProperties::GetBounds(node).upper;
-
-    if (input_type->Is(type)) return node;  // already in the value range.
-
-    const Operator* op = is_signed ? simplified()->NumberToInt32()
-                                   : simplified()->NumberToUint32();
-    Node* n = graph()->NewNode(op, node);
-    return n;
+    Type* type = NodeProperties::GetBounds(node).upper;
+    if (signedness == kSigned) {
+      if (!type->Is(Type::Signed32())) {
+        node = graph()->NewNode(simplified()->NumberToInt32(), node);
+      }
+    } else {
+      DCHECK_EQ(kUnsigned, signedness);
+      if (!type->Is(Type::Unsigned32())) {
+        node = graph()->NewNode(simplified()->NumberToUint32(), node);
+      }
+    }
+    return node;
   }
 
   void update_effect(Node* effect) {
@@ -225,15 +231,24 @@
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
-    return r.ChangeToPureOperator(simplified()->NumberAdd());
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
-  if (r.NeitherInputCanBe(maybe_string)) {
+  if (r.BothInputsAre(Type::Primitive()) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     r.ConvertInputsToNumber();
-    return r.ChangeToPureOperator(simplified()->NumberAdd());
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
 #if 0
+  // TODO(turbofan): General ToNumber disabled for now because:
+  //   a) The inserted ToNumber operation screws up observability of valueOf.
+  //   b) Deoptimization at ToNumber doesn't have corresponding bailout id.
+  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+  if (r.NeitherInputCanBe(maybe_string)) {
+    ...
+  }
+#endif
+#if 0
   // TODO(turbofan): Lowering of StringAdd is disabled for now because:
   //   a) The inserted ToString operation screws up valueOf vs. toString order.
   //   b) Deoptimization at ToString doesn't have corresponding bailout id.
@@ -250,9 +265,48 @@
 }
 
 
+Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(zero_range_)) {
+    // TODO(jarin): Propagate frame state input from non-primitive input node to
+    // JSToNumber node.
+    // TODO(titzer): some Smi bitwise operations don't really require going
+    // all the way to int32, which can save tagging/untagging for some
+    // operations
+    // on some platforms.
+    // TODO(turbofan): make this heuristic configurable for code size.
+    r.ConvertInputsToUI32(kSigned, kSigned);
+    return r.ChangeToPureOperator(machine()->Word32Or(), Type::Integral32());
+  }
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::Primitive()) || r.OneInputIs(one_range_)) {
+    // TODO(jarin): Propagate frame state input from non-primitive input node to
+    // JSToNumber node.
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberMultiply(),
+                                  Type::Number());
+  }
+  // TODO(turbofan): relax/remove the effects of this operator in other cases.
+  return NoChange();
+}
+
+
 Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
                                              const Operator* numberOp) {
   JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::Primitive())) {
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(numberOp, Type::Number());
+  }
+#if 0
+  // TODO(turbofan): General ToNumber disabled for now because:
+  //   a) The inserted ToNumber operation screws up observability of valueOf.
+  //   b) Deoptimization at ToNumber doesn't have corresponding bailout id.
   if (r.OneInputIs(Type::Primitive())) {
     // If at least one input is a primitive, then insert appropriate conversions
     // to number and reduce this operator to the given numeric one.
@@ -260,29 +314,36 @@
     r.ConvertInputsToNumber();
     return r.ChangeToPureOperator(numberOp);
   }
+#endif
   // TODO(turbofan): relax/remove the effects of this operator in other cases.
   return NoChange();
 }
 
 
-Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
-                                          bool right_signed,
-                                          const Operator* intOp) {
+Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
   JSBinopReduction r(this, node);
-  // TODO(titzer): some Smi bitwise operations don't really require going
-  // all the way to int32, which can save tagging/untagging for some operations
-  // on some platforms.
-  // TODO(turbofan): make this heuristic configurable for code size.
-  r.ConvertInputsToInt32(left_signed, right_signed);
-  return r.ChangeToPureOperator(intOp);
+  if (r.BothInputsAre(Type::Primitive())) {
+    // TODO(titzer): some Smi bitwise operations don't really require going
+    // all the way to int32, which can save tagging/untagging for some
+    // operations
+    // on some platforms.
+    // TODO(turbofan): make this heuristic configurable for code size.
+    r.ConvertInputsToUI32(kSigned, kSigned);
+    return r.ChangeToPureOperator(intOp, Type::Integral32());
+  }
+  return NoChange();
 }
 
 
-Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
-                                          const Operator* shift_op) {
+Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
+                                           Signedness left_signedness,
+                                           const Operator* shift_op) {
   JSBinopReduction r(this, node);
-  r.ConvertInputsForShift(left_signed);
-  return r.ChangeToPureOperator(shift_op);
+  if (r.BothInputsAre(Type::Primitive())) {
+    r.ConvertInputsForShift(left_signedness);
+    return r.ChangeToPureOperator(shift_op, Type::Integral32());
+  }
+  return NoChange();
 }
 
 
@@ -311,9 +372,18 @@
     }
     return r.ChangeToPureOperator(stringOp);
   }
+#if 0
+  // TODO(turbofan): General ToNumber disabled for now because:
+  //   a) The inserted ToNumber operation screws up observability of valueOf.
+  //   b) Deoptimization at ToNumber doesn't have corresponding bailout id.
   Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
   if (r.OneInputCannotBe(maybe_string)) {
     // If one input cannot be a string, then emit a number comparison.
+    ...
+  }
+#endif
+  if (r.BothInputsAre(Type::Primitive()) &&
+      r.OneInputCannotBe(Type::StringOrReceiver())) {
     const Operator* less_than;
     const Operator* less_than_or_equal;
     if (r.BothInputsAre(Type::Unsigned32())) {
@@ -379,16 +449,14 @@
   if (r.left() == r.right()) {
     // x === x is always true if x != NaN
     if (!r.left_type()->Maybe(Type::NaN())) {
-      return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
-                                         : jsgraph()->TrueConstant());
+      return ReplaceEagerly(node, jsgraph()->BooleanConstant(!invert));
     }
   }
-  if (!r.left_type()->Maybe(r.right_type())) {
-    // Type intersection is empty; === is always false unless both
-    // inputs could be strings (one internalized and one not).
-    if (r.OneInputCannotBe(Type::String())) {
-      return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
-                                         : jsgraph()->FalseConstant());
+  if (r.OneInputCannotBe(Type::NumberOrString())) {
+    // For values with canonical representation (i.e. not string nor number) an
+    // empty type intersection means the values cannot be strictly equal.
+    if (!r.left_type()->Maybe(r.right_type())) {
+      return ReplaceEagerly(node, jsgraph()->BooleanConstant(invert));
     }
   }
   if (r.OneInputIs(Type::Undefined())) {
@@ -422,16 +490,47 @@
 }
 
 
+Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
+  Node* input = node->InputAt(0);
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    // JSUnaryNot(x:boolean,context) => BooleanNot(x)
+    node->set_op(simplified()->BooleanNot());
+    node->TrimInputCount(1);
+    return Changed(node);
+  }
+  // JSUnaryNot(x,context) => BooleanNot(AnyToBoolean(x))
+  node->set_op(simplified()->BooleanNot());
+  node->ReplaceInput(0, graph()->NewNode(simplified()->AnyToBoolean(), input));
+  node->TrimInputCount(1);
+  return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
+  Node* input = node->InputAt(0);
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    // JSToBoolean(x:boolean,context) => x
+    return Replace(input);
+  }
+  // JSToBoolean(x,context) => AnyToBoolean(x)
+  node->set_op(simplified()->AnyToBoolean());
+  node->TrimInputCount(1);
+  return Changed(node);
+}
+
+
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToNumber) {
     // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToNumberInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
+    Reduction result = ReduceJSToNumber(input);
+    if (result.Changed()) return result;
     return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
   }
+  // Check if we have a cached conversion.
+  Node* conversion = FindConversion<IrOpcode::kJSToNumber>(input);
+  if (conversion) return Replace(conversion);
   Type* input_type = NodeProperties::GetBounds(input).upper;
   if (input_type->Is(Type::Number())) {
     // JSToNumber(x:number) => x
@@ -439,30 +538,113 @@
   }
   if (input_type->Is(Type::Undefined())) {
     // JSToNumber(undefined) => #NaN
-    return ReplaceWith(jsgraph()->NaNConstant());
+    return Replace(jsgraph()->NaNConstant());
   }
   if (input_type->Is(Type::Null())) {
     // JSToNumber(null) => #0
-    return ReplaceWith(jsgraph()->ZeroConstant());
+    return Replace(jsgraph()->ZeroConstant());
   }
   if (input_type->Is(Type::Boolean())) {
     // JSToNumber(x:boolean) => BooleanToNumber(x)
-    return ReplaceWith(
-        graph()->NewNode(simplified()->BooleanToNumber(), input));
+    return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
   }
   // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
   return NoChange();
 }
 
 
+Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
+  // Try to reduce the input first.
+  Node* const input = node->InputAt(0);
+  Reduction reduction = ReduceJSToNumberInput(input);
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  Type* const input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::PlainPrimitive())) {
+    if (input->opcode() == IrOpcode::kPhi) {
+      // JSToNumber(phi(x1,...,xn,control):plain-primitive,context)
+      //   => phi(JSToNumber(x1,no-context),
+      //          ...,
+      //          JSToNumber(xn,no-context),control)
+      int const input_count = input->InputCount() - 1;
+      Node* const control = input->InputAt(input_count);
+      DCHECK_LE(0, input_count);
+      DCHECK(NodeProperties::IsControl(control));
+      DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
+      DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
+      RelaxEffects(node);
+      node->set_op(common()->Phi(kMachAnyTagged, input_count));
+      for (int i = 0; i < input_count; ++i) {
+        // We must be very careful not to introduce cycles when pushing
+        // operations into phis. It is safe for {value}, since it appears
+        // as input to the phi that we are replacing, but it's not safe
+        // to simply reuse the context of the {node}. However, ToNumber()
+        // does not require a context anyways, so it's safe to discard it
+        // here and pass the dummy context.
+        Node* const value = ConvertToNumber(input->InputAt(i));
+        if (i < node->InputCount()) {
+          node->ReplaceInput(i, value);
+        } else {
+          node->AppendInput(graph()->zone(), value);
+        }
+      }
+      if (input_count < node->InputCount()) {
+        node->ReplaceInput(input_count, control);
+      } else {
+        node->AppendInput(graph()->zone(), control);
+      }
+      node->TrimInputCount(input_count + 1);
+      return Changed(node);
+    }
+    if (input->opcode() == IrOpcode::kSelect) {
+      // JSToNumber(select(c,x1,x2):plain-primitive,context)
+      //   => select(c,JSToNumber(x1,no-context),JSToNumber(x2,no-context))
+      int const input_count = input->InputCount();
+      BranchHint const input_hint = SelectParametersOf(input->op()).hint();
+      DCHECK_EQ(3, input_count);
+      DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
+      DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
+      RelaxEffects(node);
+      node->set_op(common()->Select(kMachAnyTagged, input_hint));
+      node->ReplaceInput(0, input->InputAt(0));
+      for (int i = 1; i < input_count; ++i) {
+        // We must be very careful not to introduce cycles when pushing
+        // operations into selects. It is safe for {value}, since it appears
+        // as input to the select that we are replacing, but it's not safe
+        // to simply reuse the context of the {node}. However, ToNumber()
+        // does not require a context anyways, so it's safe to discard it
+        // here and pass the dummy context.
+        Node* const value = ConvertToNumber(input->InputAt(i));
+        node->ReplaceInput(i, value);
+      }
+      node->TrimInputCount(input_count);
+      return Changed(node);
+    }
+    // Remember this conversion.
+    InsertConversion(node);
+    if (node->InputAt(1) != jsgraph()->NoContextConstant() ||
+        node->InputAt(2) != graph()->start() ||
+        node->InputAt(3) != graph()->start()) {
+      // JSToNumber(x:plain-primitive,context,effect,control)
+      //   => JSToNumber(x,no-context,start,start)
+      RelaxEffects(node);
+      node->ReplaceInput(1, jsgraph()->NoContextConstant());
+      node->ReplaceInput(2, graph()->start());
+      node->ReplaceInput(3, graph()->start());
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
+
 Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToString) {
     // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToStringInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
+    Reduction result = ReduceJSToString(input);
+    if (result.Changed()) return result;
     return Changed(input);  // JSToString(JSToString(x)) => JSToString(x)
   }
   Type* input_type = NodeProperties::GetBounds(input).upper;
@@ -470,12 +652,10 @@
     return Changed(input);  // JSToString(x:string) => x
   }
   if (input_type->Is(Type::Undefined())) {
-    return ReplaceWith(jsgraph()->HeapConstant(
-        graph()->zone()->isolate()->factory()->undefined_string()));
+    return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
   }
   if (input_type->Is(Type::Null())) {
-    return ReplaceWith(jsgraph()->HeapConstant(
-        graph()->zone()->isolate()->factory()->null_string()));
+    return Replace(jsgraph()->HeapConstant(factory()->null_string()));
   }
   // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
   // TODO(turbofan): js-typed-lowering of ToString(x:number)
@@ -483,44 +663,14 @@
 }
 
 
-Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
-  if (input->opcode() == IrOpcode::kJSToBoolean) {
-    // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
-    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+Reduction JSTypedLowering::ReduceJSToString(Node* node) {
+  // Try to reduce the input first.
+  Node* const input = node->InputAt(0);
+  Reduction reduction = ReduceJSToStringInput(input);
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
   }
-  Type* input_type = NodeProperties::GetBounds(input).upper;
-  if (input_type->Is(Type::Boolean())) {
-    return Changed(input);  // JSToBoolean(x:boolean) => x
-  }
-  if (input_type->Is(Type::Undefined())) {
-    // JSToBoolean(undefined) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::Null())) {
-    // JSToBoolean(null) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::DetectableReceiver())) {
-    // JSToBoolean(x:detectable) => #true
-    return ReplaceWith(jsgraph()->TrueConstant());
-  }
-  if (input_type->Is(Type::Undetectable())) {
-    // JSToBoolean(x:undetectable) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::OrderedNumber())) {
-    // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
-    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
-                                 jsgraph()->ZeroConstant());
-    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
-    return ReplaceWith(inv);
-  }
-  // TODO(turbofan): js-typed-lowering of ToBoolean(string)
   return NoChange();
 }
 
@@ -529,37 +679,40 @@
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* base = NodeProperties::GetValueInput(node, 0);
   Type* key_type = NodeProperties::GetBounds(key).upper;
-  Type* base_type = NodeProperties::GetBounds(base).upper;
   // TODO(mstarzinger): This lowering is not correct if:
-  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
-  //   b) The typed array or it's buffer is neutered.
-  //   c) The index is out of bounds.
-  if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
-      base_type->AsConstant()->Value()->IsJSTypedArray()) {
-    // JSLoadProperty(typed-array, int32)
-    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
-    ElementsKind elements_kind = array->map()->elements_kind();
-    ExternalArrayType type = array->type();
-    uint32_t length;
-    CHECK(array->length()->ToUint32(&length));
-    ElementAccess element_access;
-    Node* elements = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
-        NodeProperties::GetEffectInput(node));
-    if (IsExternalArrayElementsKind(elements_kind)) {
-      elements = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
-          elements, NodeProperties::GetEffectInput(node));
-      element_access = AccessBuilder::ForTypedArrayElement(type, true);
-    } else {
-      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
-      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+  //   a) The typed array or it's buffer is neutered.
+  HeapObjectMatcher<Object> mbase(base);
+  if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+    Handle<JSTypedArray> const array =
+        Handle<JSTypedArray>::cast(mbase.Value().handle());
+    array->GetBuffer()->set_is_neuterable(false);
+    BufferAccess const access(array->type());
+    size_t const k = ElementSizeLog2Of(access.machine_type());
+    double const byte_length = array->byte_length()->Number();
+    CHECK_LT(k, arraysize(shifted_int32_ranges_));
+    if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+        key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+      // JSLoadProperty(typed-array, int32)
+      Handle<ExternalArray> elements =
+          Handle<ExternalArray>::cast(handle(array->elements()));
+      Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+      Node* length = jsgraph()->Constant(byte_length);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      // Check if we can avoid the bounds check.
+      if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
+        Node* load = graph()->NewNode(
+            simplified()->LoadElement(
+                AccessBuilder::ForTypedArrayElement(array->type(), true)),
+            buffer, key, effect, control);
+        return ReplaceEagerly(node, load);
+      }
+      // Compute byte offset.
+      Node* offset = Word32Shl(key, static_cast<int>(k));
+      Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
+                                    offset, length, effect, control);
+      return ReplaceEagerly(node, load);
     }
-    Node* value =
-        graph()->NewNode(simplified()->LoadElement(element_access), elements,
-                         key, jsgraph()->Uint32Constant(length),
-                         NodeProperties::GetEffectInput(node));
-    return ReplaceEagerly(node, value);
   }
   return NoChange();
 }
@@ -570,65 +723,151 @@
   Node* base = NodeProperties::GetValueInput(node, 0);
   Node* value = NodeProperties::GetValueInput(node, 2);
   Type* key_type = NodeProperties::GetBounds(key).upper;
-  Type* base_type = NodeProperties::GetBounds(base).upper;
+  Type* value_type = NodeProperties::GetBounds(value).upper;
   // TODO(mstarzinger): This lowering is not correct if:
-  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
-  //   b) The typed array or it's buffer is neutered.
-  if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
-      base_type->AsConstant()->Value()->IsJSTypedArray()) {
-    // JSStoreProperty(typed-array, int32, value)
-    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
-    ElementsKind elements_kind = array->map()->elements_kind();
-    ExternalArrayType type = array->type();
-    uint32_t length;
-    CHECK(array->length()->ToUint32(&length));
-    ElementAccess element_access;
-    Node* elements = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
-        NodeProperties::GetEffectInput(node));
-    if (IsExternalArrayElementsKind(elements_kind)) {
-      elements = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
-          elements, NodeProperties::GetEffectInput(node));
-      element_access = AccessBuilder::ForTypedArrayElement(type, true);
-    } else {
-      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
-      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+  //   a) The typed array or its buffer is neutered.
+  HeapObjectMatcher<Object> mbase(base);
+  if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+    Handle<JSTypedArray> const array =
+        Handle<JSTypedArray>::cast(mbase.Value().handle());
+    array->GetBuffer()->set_is_neuterable(false);
+    BufferAccess const access(array->type());
+    size_t const k = ElementSizeLog2Of(access.machine_type());
+    double const byte_length = array->byte_length()->Number();
+    CHECK_LT(k, arraysize(shifted_int32_ranges_));
+    if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+        access.external_array_type() != kExternalUint8ClampedArray &&
+        key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+      // JSLoadProperty(typed-array, int32)
+      Handle<ExternalArray> elements =
+          Handle<ExternalArray>::cast(handle(array->elements()));
+      Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+      Node* length = jsgraph()->Constant(byte_length);
+      Node* context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      // Convert to a number first.
+      if (!value_type->Is(Type::Number())) {
+        Reduction number_reduction = ReduceJSToNumberInput(value);
+        if (number_reduction.Changed()) {
+          value = number_reduction.replacement();
+        } else {
+          value = effect = graph()->NewNode(javascript()->ToNumber(), value,
+                                            context, effect, control);
+        }
+      }
+      // For integer-typed arrays, convert to the integer type.
+      if (TypeOf(access.machine_type()) == kTypeInt32 &&
+          !value_type->Is(Type::Signed32())) {
+        value = graph()->NewNode(simplified()->NumberToInt32(), value);
+      } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
+                 !value_type->Is(Type::Unsigned32())) {
+        value = graph()->NewNode(simplified()->NumberToUint32(), value);
+      }
+      // Check if we can avoid the bounds check.
+      if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
+        node->set_op(simplified()->StoreElement(
+            AccessBuilder::ForTypedArrayElement(array->type(), true)));
+        node->ReplaceInput(0, buffer);
+        DCHECK_EQ(key, node->InputAt(1));
+        node->ReplaceInput(2, value);
+        node->ReplaceInput(3, effect);
+        node->ReplaceInput(4, control);
+        node->TrimInputCount(5);
+        return Changed(node);
+      }
+      // Compute byte offset.
+      Node* offset = Word32Shl(key, static_cast<int>(k));
+      // Turn into a StoreBuffer operation.
+      node->set_op(simplified()->StoreBuffer(access));
+      node->ReplaceInput(0, buffer);
+      node->ReplaceInput(1, offset);
+      node->ReplaceInput(2, length);
+      node->ReplaceInput(3, value);
+      node->ReplaceInput(4, effect);
+      DCHECK_EQ(control, node->InputAt(5));
+      DCHECK_EQ(6, node->InputCount());
+      return Changed(node);
     }
-
-    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
-                                   jsgraph()->Uint32Constant(length));
-    Node* branch = graph()->NewNode(common()->Branch(), check,
-                                    NodeProperties::GetControlInput(node));
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* store =
-        graph()->NewNode(simplified()->StoreElement(element_access), elements,
-                         key, jsgraph()->Uint32Constant(length), value,
-                         NodeProperties::GetEffectInput(node), if_true);
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
-    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
-                                 NodeProperties::GetEffectInput(node), merge);
-
-    return ReplaceWith(phi);
   }
   return NoChange();
 }
 
 
-static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
-  if (reduction.Changed()) {
-    NodeProperties::ReplaceWithValue(node, reduction.replacement());
-    return reduction;
+Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = graph()->start();
+  for (size_t i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               simplified()->LoadField(
+                   AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetValueInput(node, 0), effect, control));
   }
-  return Reducer::NoChange();
+  node->set_op(
+      simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
+  node->ReplaceInput(1, effect);
+  node->ReplaceInput(2, control);
+  DCHECK_EQ(3, node->InputCount());
+  return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = graph()->start();
+  for (size_t i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               simplified()->LoadField(
+                   AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetValueInput(node, 0), effect, control));
+  }
+  node->set_op(
+      simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
+  node->RemoveInput(2);
+  DCHECK_EQ(4, node->InputCount());
+  return Changed(node);
 }
 
 
 Reduction JSTypedLowering::Reduce(Node* node) {
+  // Check if the output type is a singleton.  In that case we already know the
+  // result value and can simply replace the node if it's eliminable.
+  if (NodeProperties::IsTyped(node) &&
+      !IrOpcode::IsLeafOpcode(node->opcode()) &&
+      node->op()->HasProperty(Operator::kEliminatable)) {
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    if (upper->IsConstant()) {
+      Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    } else if (upper->Is(Type::MinusZero())) {
+      Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    } else if (upper->Is(Type::NaN())) {
+      Node* replacement = jsgraph()->NaNConstant();
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    } else if (upper->Is(Type::Null())) {
+      Node* replacement = jsgraph()->NullConstant();
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    } else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
+      Node* replacement = jsgraph()->Constant(upper->Min());
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    } else if (upper->Is(Type::Undefined())) {
+      Node* replacement = jsgraph()->UndefinedConstant();
+      NodeProperties::ReplaceWithValue(node, replacement);
+      return Changed(replacement);
+    }
+  }
   switch (node->opcode()) {
     case IrOpcode::kJSEqual:
       return ReduceJSEqual(node, false);
@@ -644,67 +883,113 @@
     case IrOpcode::kJSGreaterThanOrEqual:
       return ReduceJSComparison(node);
     case IrOpcode::kJSBitwiseOr:
-      return ReduceI32Binop(node, true, true, machine()->Word32Or());
+      return ReduceJSBitwiseOr(node);
     case IrOpcode::kJSBitwiseXor:
-      return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+      return ReduceInt32Binop(node, machine()->Word32Xor());
     case IrOpcode::kJSBitwiseAnd:
-      return ReduceI32Binop(node, true, true, machine()->Word32And());
+      return ReduceInt32Binop(node, machine()->Word32And());
     case IrOpcode::kJSShiftLeft:
-      return ReduceI32Shift(node, true, machine()->Word32Shl());
+      return ReduceUI32Shift(node, kSigned, machine()->Word32Shl());
     case IrOpcode::kJSShiftRight:
-      return ReduceI32Shift(node, true, machine()->Word32Sar());
+      return ReduceUI32Shift(node, kSigned, machine()->Word32Sar());
     case IrOpcode::kJSShiftRightLogical:
-      return ReduceI32Shift(node, false, machine()->Word32Shr());
+      return ReduceUI32Shift(node, kUnsigned, machine()->Word32Shr());
     case IrOpcode::kJSAdd:
       return ReduceJSAdd(node);
     case IrOpcode::kJSSubtract:
       return ReduceNumberBinop(node, simplified()->NumberSubtract());
     case IrOpcode::kJSMultiply:
-      return ReduceNumberBinop(node, simplified()->NumberMultiply());
+      return ReduceJSMultiply(node);
     case IrOpcode::kJSDivide:
       return ReduceNumberBinop(node, simplified()->NumberDivide());
     case IrOpcode::kJSModulus:
       return ReduceNumberBinop(node, simplified()->NumberModulus());
-    case IrOpcode::kJSUnaryNot: {
-      Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
-      Node* value;
-      if (result.Changed()) {
-        // JSUnaryNot(x:boolean) => BooleanNot(x)
-        value =
-            graph()->NewNode(simplified()->BooleanNot(), result.replacement());
-        NodeProperties::ReplaceWithValue(node, value);
-        return Changed(value);
-      } else {
-        // JSUnaryNot(x) => BooleanNot(JSToBoolean(x))
-        value = graph()->NewNode(simplified()->BooleanNot(), node);
-        node->set_op(javascript()->ToBoolean());
-        NodeProperties::ReplaceWithValue(node, value, node);
-        // Note: ReplaceUses() smashes all uses, so smash it back here.
-        value->ReplaceInput(0, node);
-        return Changed(node);
-      }
-    }
+    case IrOpcode::kJSUnaryNot:
+      return ReduceJSUnaryNot(node);
     case IrOpcode::kJSToBoolean:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToBooleanInput(node->InputAt(0)));
+      return ReduceJSToBoolean(node);
     case IrOpcode::kJSToNumber:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToNumberInput(node->InputAt(0)));
+      return ReduceJSToNumber(node);
     case IrOpcode::kJSToString:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToStringInput(node->InputAt(0)));
+      return ReduceJSToString(node);
     case IrOpcode::kJSLoadProperty:
       return ReduceJSLoadProperty(node);
     case IrOpcode::kJSStoreProperty:
       return ReduceJSStoreProperty(node);
-    case IrOpcode::kJSCallFunction:
-      return JSBuiltinReducer(jsgraph()).Reduce(node);
+    case IrOpcode::kJSLoadContext:
+      return ReduceJSLoadContext(node);
+    case IrOpcode::kJSStoreContext:
+      return ReduceJSStoreContext(node);
     default:
       break;
   }
   return NoChange();
 }
 
+
+Node* JSTypedLowering::ConvertToNumber(Node* input) {
+  DCHECK(NodeProperties::GetBounds(input).upper->Is(Type::PlainPrimitive()));
+  // Avoid inserting too many eager ToNumber() operations.
+  Reduction const reduction = ReduceJSToNumberInput(input);
+  if (reduction.Changed()) return reduction.replacement();
+  Node* const conversion = graph()->NewNode(javascript()->ToNumber(), input,
+                                            jsgraph()->NoContextConstant(),
+                                            graph()->start(), graph()->start());
+  InsertConversion(conversion);
+  return conversion;
+}
+
+
+template <IrOpcode::Value kOpcode>
+Node* JSTypedLowering::FindConversion(Node* input) {
+  size_t const input_id = input->id();
+  if (input_id < conversions_.size()) {
+    Node* const conversion = conversions_[input_id];
+    if (conversion && conversion->opcode() == kOpcode) {
+      return conversion;
+    }
+  }
+  return nullptr;
+}
+
+
+void JSTypedLowering::InsertConversion(Node* conversion) {
+  DCHECK(conversion->opcode() == IrOpcode::kJSToNumber);
+  size_t const input_id = conversion->InputAt(0)->id();
+  if (input_id >= conversions_.size()) {
+    conversions_.resize(2 * input_id + 1);
+  }
+  conversions_[input_id] = conversion;
+}
+
+
+Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
+  if (rhs == 0) return lhs;
+  return graph()->NewNode(machine()->Word32Shl(), lhs,
+                          jsgraph()->Int32Constant(rhs));
+}
+
+
+Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
+
+
+Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); }
+
+
+JSOperatorBuilder* JSTypedLowering::javascript() const {
+  return jsgraph()->javascript();
+}
+
+
+CommonOperatorBuilder* JSTypedLowering::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSTypedLowering::machine() const {
+  return jsgraph()->machine();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index deaf1fa..838085e 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -6,55 +6,74 @@
 #define V8_COMPILER_JS_TYPED_LOWERING_H_
 
 #include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+
+
 // Lowers JS-level operators to simplified operators based on types.
 class JSTypedLowering FINAL : public Reducer {
  public:
-  explicit JSTypedLowering(JSGraph* jsgraph)
-      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
-  virtual ~JSTypedLowering();
+  JSTypedLowering(JSGraph* jsgraph, Zone* zone);
+  ~JSTypedLowering() FINAL {}
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
-
-  JSGraph* jsgraph() { return jsgraph_; }
-  Graph* graph() { return jsgraph_->graph(); }
-  Zone* zone() { return jsgraph_->zone(); }
+  Reduction Reduce(Node* node) FINAL;
 
  private:
   friend class JSBinopReduction;
 
   Reduction ReplaceEagerly(Node* old, Node* node);
-  Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
   Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSBitwiseOr(Node* node);
+  Reduction ReduceJSMultiply(Node* node);
   Reduction ReduceJSComparison(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
   Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSStoreContext(Node* node);
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
+  Reduction ReduceJSUnaryNot(Node* node);
+  Reduction ReduceJSToBoolean(Node* node);
   Reduction ReduceJSToNumberInput(Node* input);
+  Reduction ReduceJSToNumber(Node* node);
   Reduction ReduceJSToStringInput(Node* input);
-  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceJSToString(Node* node);
   Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
-  Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
-                           const Operator* intOp);
-  Reduction ReduceI32Shift(Node* node, bool left_signed,
-                           const Operator* shift_op);
+  Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
+  Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
+                            const Operator* shift_op);
 
-  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
-  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  Node* ConvertToNumber(Node* input);
+  template <IrOpcode::Value>
+  Node* FindConversion(Node* input);
+  void InsertConversion(Node* conversion);
+
+  Node* Word32Shl(Node* const lhs, int32_t const rhs);
+
+  Factory* factory() const;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  JSOperatorBuilder* javascript() const;
+  CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+  MachineOperatorBuilder* machine() const;
 
   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
+  ZoneVector<Node*> conversions_;  // Cache inserted JSToXXX() conversions.
+  Type* zero_range_;
+  Type* one_range_;
+  Type* zero_thirtyone_range_;
+  Type* shifted_int32_ranges_[4];
 };
 
 }  // namespace compiler
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
new file mode 100644
index 0000000..f0bb731
--- /dev/null
+++ b/src/compiler/jump-threading.cc
@@ -0,0 +1,198 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/jump-threading.h"
+#include "src/compiler/code-generator-impl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef BasicBlock::RpoNumber RpoNumber;
+
+#define TRACE(x) \
+  if (FLAG_trace_turbo_jt) PrintF x
+
+struct JumpThreadingState {
+  bool forwarded;
+  ZoneVector<RpoNumber>& result;
+  ZoneStack<RpoNumber>& stack;
+
+  void Clear(size_t count) { result.assign(count, unvisited()); }
+  void PushIfUnvisited(RpoNumber num) {
+    if (result[num.ToInt()] == unvisited()) {
+      stack.push(num);
+      result[num.ToInt()] = onstack();
+    }
+  }
+  void Forward(RpoNumber to) {
+    RpoNumber from = stack.top();
+    RpoNumber to_to = result[to.ToInt()];
+    bool pop = true;
+    if (to == from) {
+      TRACE(("  xx %d\n", from.ToInt()));
+      result[from.ToInt()] = from;
+    } else if (to_to == unvisited()) {
+      TRACE(("  fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt()));
+      stack.push(to);
+      result[to.ToInt()] = onstack();
+      pop = false;  // recurse.
+    } else if (to_to == onstack()) {
+      TRACE(("  fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt()));
+      result[from.ToInt()] = to;  // break the cycle.
+      forwarded = true;
+    } else {
+      TRACE(("  fw %d -> %d (forward)\n", from.ToInt(), to.ToInt()));
+      result[from.ToInt()] = to_to;  // forward the block.
+      forwarded = true;
+    }
+    if (pop) stack.pop();
+  }
+  RpoNumber unvisited() { return RpoNumber::FromInt(-1); }
+  RpoNumber onstack() { return RpoNumber::FromInt(-2); }
+};
+
+
+bool JumpThreading::ComputeForwarding(Zone* local_zone,
+                                      ZoneVector<RpoNumber>& result,
+                                      InstructionSequence* code) {
+  ZoneStack<RpoNumber> stack(local_zone);
+  JumpThreadingState state = {false, result, stack};
+  state.Clear(code->InstructionBlockCount());
+
+  // Iterate over the blocks forward, pushing the blocks onto the stack.
+  for (auto const block : code->instruction_blocks()) {
+    RpoNumber current = block->rpo_number();
+    state.PushIfUnvisited(current);
+
+    // Process the stack, which implements DFS through empty blocks.
+    while (!state.stack.empty()) {
+      InstructionBlock* block = code->InstructionBlockAt(state.stack.top());
+      // Process the instructions in a block up to a non-empty instruction.
+      TRACE(("jt [%d] B%d RPO%d\n", static_cast<int>(stack.size()),
+             block->id().ToInt(), block->rpo_number().ToInt()));
+      bool fallthru = true;
+      RpoNumber fw = block->rpo_number();
+      for (int i = block->code_start(); i < block->code_end(); ++i) {
+        Instruction* instr = code->InstructionAt(i);
+        if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
+          // skip redundant gap moves.
+          TRACE(("  nop gap\n"));
+          continue;
+        } else if (instr->IsSourcePosition()) {
+          // skip source positions.
+          TRACE(("  src pos\n"));
+          continue;
+        } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+          // can't skip instructions with flags continuations.
+          TRACE(("  flags\n"));
+          fallthru = false;
+        } else if (instr->IsNop()) {
+          // skip nops.
+          TRACE(("  nop\n"));
+          continue;
+        } else if (instr->arch_opcode() == kArchJmp) {
+          // try to forward the jump instruction.
+          TRACE(("  jmp\n"));
+          fw = code->InputRpo(instr, 0);
+          fallthru = false;
+        } else {
+          // can't skip other instructions.
+          TRACE(("  other\n"));
+          fallthru = false;
+        }
+        break;
+      }
+      if (fallthru) {
+        int next = 1 + block->rpo_number().ToInt();
+        if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
+      }
+      state.Forward(fw);
+    }
+  }
+
+#ifdef DEBUG
+  for (RpoNumber num : result) {
+    CHECK(num.IsValid());
+  }
+#endif
+
+  if (FLAG_trace_turbo_jt) {
+    for (int i = 0; i < static_cast<int>(result.size()); i++) {
+      TRACE(("RPO%d B%d ", i,
+             code->InstructionBlockAt(RpoNumber::FromInt(i))->id().ToInt()));
+      int to = result[i].ToInt();
+      if (i != to) {
+        TRACE(("-> B%d\n",
+               code->InstructionBlockAt(RpoNumber::FromInt(to))->id().ToInt()));
+      } else {
+        TRACE(("\n"));
+      }
+    }
+  }
+
+  return state.forwarded;
+}
+
+
+void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
+                                    InstructionSequence* code) {
+  if (!FLAG_turbo_jt) return;
+
+  Zone local_zone(code->zone()->isolate());
+  ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
+
+  // Skip empty blocks when the previous block doesn't fall through.
+  bool prev_fallthru = true;
+  for (auto const block : code->instruction_blocks()) {
+    int block_num = block->rpo_number().ToInt();
+    skip[block_num] = !prev_fallthru && result[block_num].ToInt() != block_num;
+
+    bool fallthru = true;
+    for (int i = block->code_start(); i < block->code_end(); ++i) {
+      Instruction* instr = code->InstructionAt(i);
+      if (FlagsModeField::decode(instr->opcode()) == kFlags_branch) {
+        fallthru = false;  // branches don't fall through to the next block.
+      } else if (instr->arch_opcode() == kArchJmp) {
+        if (skip[block_num]) {
+          // Overwrite a redundant jump with a nop.
+          TRACE(("jt-fw nop @%d\n", i));
+          instr->OverwriteWithNop();
+        }
+        fallthru = false;  // jumps don't fall through to the next block.
+      }
+    }
+    prev_fallthru = fallthru;
+  }
+
+  // Patch RPO immediates.
+  InstructionSequence::Immediates& immediates = code->immediates();
+  for (size_t i = 0; i < immediates.size(); i++) {
+    Constant constant = immediates[i];
+    if (constant.type() == Constant::kRpoNumber) {
+      RpoNumber rpo = constant.ToRpoNumber();
+      RpoNumber fw = result[rpo.ToInt()];
+      if (!(fw == rpo)) immediates[i] = Constant(fw);
+    }
+  }
+
+  // Recompute assembly order numbers.
+  int ao = 0;
+  for (auto const block : code->instruction_blocks()) {
+    if (!block->IsDeferred()) {
+      block->set_ao_number(RpoNumber::FromInt(ao));
+      if (!skip[block->rpo_number().ToInt()]) ao++;
+    }
+  }
+  for (auto const block : code->instruction_blocks()) {
+    if (block->IsDeferred()) {
+      block->set_ao_number(RpoNumber::FromInt(ao));
+      if (!skip[block->rpo_number().ToInt()]) ao++;
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/jump-threading.h b/src/compiler/jump-threading.h
new file mode 100644
index 0000000..b801fec
--- /dev/null
+++ b/src/compiler/jump-threading.h
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JUMP_THREADING_H_
+#define V8_COMPILER_JUMP_THREADING_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forwards jumps to empty basic blocks that end with a second jump to the
+// destination of the second jump, transitively.
+class JumpThreading {
+ public:
+  // Compute the forwarding map of basic blocks to their ultimate destination.
+  // Returns {true} if there is at least one block that is forwarded.
+  static bool ComputeForwarding(Zone* local_zone,
+                                ZoneVector<BasicBlock::RpoNumber>& result,
+                                InstructionSequence* code);
+
+  // Rewrite the instructions to forward jumps and branches.
+  // May also negate some branches.
+  static void ApplyForwarding(ZoneVector<BasicBlock::RpoNumber>& forwarding,
+                              InstructionSequence* code);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JUMP_THREADING_H
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
index c32c706..c13bd74 100644
--- a/src/compiler/linkage-impl.h
+++ b/src/compiler/linkage-impl.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_LINKAGE_IMPL_H_
 #define V8_COMPILER_LINKAGE_IMPL_H_
 
+#include "src/code-stubs.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -26,8 +28,8 @@
   }
 
   // TODO(turbofan): cache call descriptors for JSFunction calls.
-  static CallDescriptor* GetJSCallDescriptor(Zone* zone,
-                                             int js_parameter_count) {
+  static CallDescriptor* GetJSCallDescriptor(Zone* zone, int js_parameter_count,
+                                             CallDescriptor::Flags flags) {
     const size_t return_count = 1;
     const size_t context_count = 1;
     const size_t parameter_count = js_parameter_count + context_count;
@@ -54,16 +56,17 @@
     // The target for JS function calls is the JSFunction object.
     MachineType target_type = kMachAnyTagged;
     LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
-    return new (zone) CallDescriptor(CallDescriptor::kCallJSFunction,  // kind
-                                     target_type,         // target MachineType
-                                     target_loc,          // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
-                                     js_parameter_count,  // js_parameter_count
-                                     Operator::kNoProperties,  // properties
-                                     kNoCalleeSaved,           // callee-saved
-                                     CallDescriptor::kNeedsFrameState,  // flags
-                                     "js-call");
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallJSFunction,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        Operator::kNoProperties,          // properties
+        kNoCalleeSaved,                   // callee-saved
+        flags,                            // flags
+        "js-call");
   }
 
 
@@ -114,23 +117,25 @@
     // The target for runtime calls is a code object.
     MachineType target_type = kMachAnyTagged;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
-    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
-                                     target_type,         // target MachineType
-                                     target_loc,          // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
-                                     js_parameter_count,  // js_parameter_count
-                                     properties,          // properties
-                                     kNoCalleeSaved,      // callee-saved
-                                     flags,               // flags
-                                     function->name);     // debug name
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallCodeObject,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        properties,                       // properties
+        kNoCalleeSaved,                   // callee-saved
+        flags,                            // flags
+        function->name);                  // debug name
   }
 
 
   // TODO(turbofan): cache call descriptors for code stub calls.
   static CallDescriptor* GetStubCallDescriptor(
-      Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
-      CallDescriptor::Flags flags) {
+      Zone* zone, const CallInterfaceDescriptor& descriptor,
+      int stack_parameter_count, CallDescriptor::Flags flags,
+      Operator::Properties properties) {
     const int register_parameter_count =
         descriptor.GetEnvironmentParameterCount();
     const int js_parameter_count =
@@ -167,16 +172,17 @@
     // The target for stub calls is a code object.
     MachineType target_type = kMachAnyTagged;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
-    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
-                                     target_type,         // target MachineType
-                                     target_loc,          // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
-                                     js_parameter_count,  // js_parameter_count
-                                     Operator::kNoProperties,  // properties
-                                     kNoCalleeSaved,  // callee-saved registers
-                                     flags,           // flags
-                                     descriptor.DebugName(zone->isolate()));
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallCodeObject,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        properties,                       // properties
+        kNoCalleeSaved,                   // callee-saved registers
+        flags,                            // flags
+        descriptor.DebugName(zone->isolate()));
   }
 
   static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
@@ -199,15 +205,16 @@
     // The target for C calls is always an address (i.e. machine pointer).
     MachineType target_type = kMachPtr;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
-    return new (zone) CallDescriptor(CallDescriptor::kCallAddress,  // kind
-                                     target_type,        // target MachineType
-                                     target_loc,         // target location
-                                     msig,               // machine_sig
-                                     locations.Build(),  // location_sig
-                                     0,                  // js_parameter_count
-                                     Operator::kNoProperties,  // properties
-                                     LinkageTraits::CCalleeSaveRegisters(),
-                                     CallDescriptor::kNoFlags, "c-call");
+    return new (zone) CallDescriptor(  // --
+        CallDescriptor::kCallAddress,  // kind
+        target_type,                   // target MachineType
+        target_loc,                    // target location
+        msig,                          // machine_sig
+        locations.Build(),             // location_sig
+        0,                             // js_parameter_count
+        Operator::kNoProperties,       // properties
+        LinkageTraits::CCalleeSaveRegisters(), CallDescriptor::kNoFlags,
+        "c-call");
   }
 
   static LinkageLocation regloc(Register reg) {
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 465a667..fc6b19e 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -2,10 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/linkage.h"
-
 #include "src/code-stubs.h"
 #include "src/compiler.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node.h"
 #include "src/compiler/pipeline.h"
 #include "src/scopes.h"
@@ -15,7 +14,7 @@
 namespace compiler {
 
 
-OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
+std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
   switch (k) {
     case CallDescriptor::kCallCodeObject:
       os << "Code";
@@ -31,7 +30,7 @@
 }
 
 
-OStream& operator<<(OStream& os, const CallDescriptor& d) {
+std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
   // TODO(svenpanne) Output properties etc. and be less cryptic.
   return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
             << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
@@ -39,28 +38,33 @@
 }
 
 
-Linkage::Linkage(CompilationInfo* info) : info_(info) {
+CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
   if (info->function() != NULL) {
     // If we already have the function literal, use the number of parameters
     // plus the receiver.
-    incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
-  } else if (!info->closure().is_null()) {
+    return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone,
+                               CallDescriptor::kNoFlags);
+  }
+  if (!info->closure().is_null()) {
     // If we are compiling a JS function, use a JS call descriptor,
     // plus the receiver.
     SharedFunctionInfo* shared = info->closure()->shared();
-    incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
-  } else if (info->code_stub() != NULL) {
+    return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone,
+                               CallDescriptor::kNoFlags);
+  }
+  if (info->code_stub() != NULL) {
     // Use the code stub interface descriptor.
     CallInterfaceDescriptor descriptor =
         info->code_stub()->GetCallInterfaceDescriptor();
-    incoming_ = GetStubCallDescriptor(descriptor);
-  } else {
-    incoming_ = NULL;  // TODO(titzer): ?
+    return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags,
+                                 Operator::kNoProperties, zone);
   }
+  return NULL;  // TODO(titzer): ?
 }
 
 
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame,
+                                    int extra) const {
   if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
       incoming_->kind() == CallDescriptor::kCallAddress) {
     int offset;
@@ -87,24 +91,24 @@
 }
 
 
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
-  return GetJSCallDescriptor(parameter_count, this->info_->zone());
+CallDescriptor* Linkage::GetJSCallDescriptor(
+    int parameter_count, CallDescriptor::Flags flags) const {
+  return GetJSCallDescriptor(parameter_count, zone_, flags);
 }
 
 
 CallDescriptor* Linkage::GetRuntimeCallDescriptor(
     Runtime::FunctionId function, int parameter_count,
-    Operator::Properties properties) {
-  return GetRuntimeCallDescriptor(function, parameter_count, properties,
-                                  this->info_->zone());
+    Operator::Properties properties) const {
+  return GetRuntimeCallDescriptor(function, parameter_count, properties, zone_);
 }
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties) const {
   return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
-                               this->info_->zone());
+                               properties, zone_);
 }
 
 
@@ -116,16 +120,98 @@
   // TODO(jarin) At the moment, we only add frame state for
   // few chosen runtime functions.
   switch (function) {
-    case Runtime::kDebugBreak:
-    case Runtime::kDebugGetLoadedScripts:
-    case Runtime::kDeoptimizeFunction:
-    case Runtime::kInlineCallFunction:
-    case Runtime::kPrepareStep:
-    case Runtime::kSetScriptBreakPoint:
-    case Runtime::kStackGuard:
+    case Runtime::kApply:
+    case Runtime::kArrayBufferNeuter:
+    case Runtime::kArrayConcat:
+    case Runtime::kBasicJSONStringify:
     case Runtime::kCheckExecutionState:
-    case Runtime::kDebugEvaluate:
     case Runtime::kCollectStackTrace:
+    case Runtime::kCompileLazy:
+    case Runtime::kCompileOptimized:
+    case Runtime::kCompileString:
+    case Runtime::kCreateObjectLiteral:
+    case Runtime::kDebugBreak:
+    case Runtime::kDataViewSetInt8:
+    case Runtime::kDataViewSetUint8:
+    case Runtime::kDataViewSetInt16:
+    case Runtime::kDataViewSetUint16:
+    case Runtime::kDataViewSetInt32:
+    case Runtime::kDataViewSetUint32:
+    case Runtime::kDataViewSetFloat32:
+    case Runtime::kDataViewSetFloat64:
+    case Runtime::kDataViewGetInt8:
+    case Runtime::kDataViewGetUint8:
+    case Runtime::kDataViewGetInt16:
+    case Runtime::kDataViewGetUint16:
+    case Runtime::kDataViewGetInt32:
+    case Runtime::kDataViewGetUint32:
+    case Runtime::kDataViewGetFloat32:
+    case Runtime::kDataViewGetFloat64:
+    case Runtime::kDebugEvaluate:
+    case Runtime::kDebugEvaluateGlobal:
+    case Runtime::kDebugGetLoadedScripts:
+    case Runtime::kDebugGetPropertyDetails:
+    case Runtime::kDebugPromiseEvent:
+    case Runtime::kDefineAccessorPropertyUnchecked:
+    case Runtime::kDefineDataPropertyUnchecked:
+    case Runtime::kDeleteProperty:
+    case Runtime::kDeoptimizeFunction:
+    case Runtime::kFunctionBindArguments:
+    case Runtime::kGetDefaultReceiver:
+    case Runtime::kGetFrameCount:
+    case Runtime::kGetOwnProperty:
+    case Runtime::kGetOwnPropertyNames:
+    case Runtime::kGetPropertyNamesFast:
+    case Runtime::kGetPrototype:
+    case Runtime::kInlineArguments:
+    case Runtime::kInlineCallFunction:
+    case Runtime::kInlineDateField:
+    case Runtime::kInlineRegExpExec:
+    case Runtime::kInternalSetPrototype:
+    case Runtime::kInterrupt:
+    case Runtime::kIsPropertyEnumerable:
+    case Runtime::kIsSloppyModeFunction:
+    case Runtime::kLiveEditGatherCompileInfo:
+    case Runtime::kLoadLookupSlot:
+    case Runtime::kLoadLookupSlotNoReferenceError:
+    case Runtime::kMaterializeRegExpLiteral:
+    case Runtime::kNewObject:
+    case Runtime::kNewObjectFromBound:
+    case Runtime::kNewObjectWithAllocationSite:
+    case Runtime::kObjectFreeze:
+    case Runtime::kOwnKeys:
+    case Runtime::kParseJson:
+    case Runtime::kPrepareStep:
+    case Runtime::kPreventExtensions:
+    case Runtime::kPromiseRejectEvent:
+    case Runtime::kPromiseRevokeReject:
+    case Runtime::kRegExpInitializeAndCompile:
+    case Runtime::kRegExpExecMultiple:
+    case Runtime::kResolvePossiblyDirectEval:
+    case Runtime::kRunMicrotasks:
+    case Runtime::kSetPrototype:
+    case Runtime::kSetScriptBreakPoint:
+    case Runtime::kSparseJoinWithSeparator:
+    case Runtime::kStackGuard:
+    case Runtime::kStoreKeyedToSuper_Sloppy:
+    case Runtime::kStoreKeyedToSuper_Strict:
+    case Runtime::kStoreToSuper_Sloppy:
+    case Runtime::kStoreToSuper_Strict:
+    case Runtime::kStoreLookupSlot:
+    case Runtime::kStringBuilderConcat:
+    case Runtime::kStringBuilderJoin:
+    case Runtime::kStringMatch:
+    case Runtime::kStringReplaceGlobalRegExpWithString:
+    case Runtime::kThrowNonMethodError:
+    case Runtime::kThrowNotDateError:
+    case Runtime::kThrowReferenceError:
+    case Runtime::kThrowUnsupportedSuperError:
+    case Runtime::kThrow:
+    case Runtime::kTypedArraySetFastCases:
+    case Runtime::kTypedArrayInitializeFromArrayLike:
+#ifdef V8_I18N_SUPPORT
+    case Runtime::kGetImplFromInitializedIntlObject:
+#endif
       return true;
     default:
       return false;
@@ -137,7 +223,8 @@
 // Provide unimplemented methods on unsupported architectures, to at least link.
 //==============================================================================
 #if !V8_TURBOFAN_BACKEND
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
   UNIMPLEMENTED();
   return NULL;
 }
@@ -152,8 +239,9 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties,
+    Zone* zone) {
   UNIMPLEMENTED();
   return NULL;
 }
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index c5cef5e..0ad0761 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -6,15 +6,16 @@
 #define V8_COMPILER_LINKAGE_H_
 
 #include "src/base/flags.h"
-#include "src/code-stubs.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/machine-type.h"
-#include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
+
+class CallInterfaceDescriptor;
+
 namespace compiler {
 
 // Describes the location for a parameter or a return value to a call.
@@ -129,22 +130,24 @@
  private:
   friend class Linkage;
 
-  Kind kind_;
-  MachineType target_type_;
-  LinkageLocation target_loc_;
-  MachineSignature* machine_sig_;
-  LocationSignature* location_sig_;
-  size_t js_param_count_;
-  Operator::Properties properties_;
-  RegList callee_saved_registers_;
-  Flags flags_;
-  const char* debug_name_;
+  const Kind kind_;
+  const MachineType target_type_;
+  const LinkageLocation target_loc_;
+  const MachineSignature* const machine_sig_;
+  const LocationSignature* const location_sig_;
+  const size_t js_param_count_;
+  const Operator::Properties properties_;
+  const RegList callee_saved_registers_;
+  const Flags flags_;
+  const char* const debug_name_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallDescriptor);
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
 
-OStream& operator<<(OStream& os, const CallDescriptor& d);
-OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
+std::ostream& operator<<(std::ostream& os, const CallDescriptor& d);
+std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
 
 // Defines the linkage for a compilation, including the calling conventions
 // for incoming parameters and return value(s) as well as the outgoing calling
@@ -161,28 +164,34 @@
 // Call[Runtime]    CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
 class Linkage : public ZoneObject {
  public:
-  explicit Linkage(CompilationInfo* info);
-  explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
-      : info_(info), incoming_(incoming) {}
+  Linkage(Zone* zone, CompilationInfo* info)
+      : zone_(zone), incoming_(ComputeIncoming(zone, info)) {}
+  Linkage(Zone* zone, CallDescriptor* incoming)
+      : zone_(zone), incoming_(incoming) {}
+
+  static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
 
   // The call descriptor for this compilation unit describes the locations
   // of incoming parameters and the outgoing return value(s).
-  CallDescriptor* GetIncomingDescriptor() { return incoming_; }
-  CallDescriptor* GetJSCallDescriptor(int parameter_count);
-  static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
-  CallDescriptor* GetRuntimeCallDescriptor(Runtime::FunctionId function,
-                                           int parameter_count,
-                                           Operator::Properties properties);
+  CallDescriptor* GetIncomingDescriptor() const { return incoming_; }
+  CallDescriptor* GetJSCallDescriptor(int parameter_count,
+                                      CallDescriptor::Flags flags) const;
+  static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags);
+  CallDescriptor* GetRuntimeCallDescriptor(
+      Runtime::FunctionId function, int parameter_count,
+      Operator::Properties properties) const;
   static CallDescriptor* GetRuntimeCallDescriptor(
       Runtime::FunctionId function, int parameter_count,
       Operator::Properties properties, Zone* zone);
 
   CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
-      CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
+      const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags,
+      Operator::Properties properties = Operator::kNoProperties) const;
   static CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count,
-      CallDescriptor::Flags flags, Zone* zone);
+      const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+      CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone);
 
   // Creates a call descriptor for simplified C calls that is appropriate
   // for the host platform. This simplified calling convention only supports
@@ -192,37 +201,37 @@
                                                   MachineSignature* sig);
 
   // Get the location of an (incoming) parameter to this function.
-  LinkageLocation GetParameterLocation(int index) {
+  LinkageLocation GetParameterLocation(int index) const {
     return incoming_->GetInputLocation(index + 1);  // + 1 to skip target.
   }
 
   // Get the machine type of an (incoming) parameter to this function.
-  MachineType GetParameterType(int index) {
+  MachineType GetParameterType(int index) const {
     return incoming_->GetInputType(index + 1);  // + 1 to skip target.
   }
 
   // Get the location where this function should place its return value.
-  LinkageLocation GetReturnLocation() {
+  LinkageLocation GetReturnLocation() const {
     return incoming_->GetReturnLocation(0);
   }
 
   // Get the machine type of this function's return value.
-  MachineType GetReturnType() { return incoming_->GetReturnType(0); }
+  MachineType GetReturnType() const { return incoming_->GetReturnType(0); }
 
   // Get the frame offset for a given spill slot. The location depends on the
   // calling convention and the specific frame layout, and may thus be
   // architecture-specific. Negative spill slots indicate arguments on the
   // caller's frame. The {extra} parameter indicates an additional offset from
   // the frame offset, e.g. to index into part of a double slot.
-  FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
-
-  CompilationInfo* info() const { return info_; }
+  FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0) const;
 
   static bool NeedsFrameState(Runtime::FunctionId function);
 
  private:
-  CompilationInfo* info_;
-  CallDescriptor* incoming_;
+  Zone* const zone_;
+  CallDescriptor* const incoming_;
+
+  DISALLOW_COPY_AND_ASSIGN(Linkage);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
new file mode 100644
index 0000000..fe0714e
--- /dev/null
+++ b/src/compiler/load-elimination.cc
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/load-elimination.h"
+
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+LoadElimination::~LoadElimination() {}
+
+
+Reduction LoadElimination::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kLoadField:
+      return ReduceLoadField(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction LoadElimination::ReduceLoadField(Node* node) {
+  DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+  FieldAccess const access = FieldAccessOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  for (Node* effect = NodeProperties::GetEffectInput(node);;
+       effect = NodeProperties::GetEffectInput(effect)) {
+    switch (effect->opcode()) {
+      case IrOpcode::kLoadField: {
+        if (object == NodeProperties::GetValueInput(effect, 0) &&
+            access == FieldAccessOf(effect->op())) {
+          Node* const value = effect;
+          NodeProperties::ReplaceWithValue(node, value);
+          return Replace(value);
+        }
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        if (access == FieldAccessOf(effect->op())) {
+          if (object == NodeProperties::GetValueInput(effect, 0)) {
+            Node* const value = NodeProperties::GetValueInput(effect, 1);
+            NodeProperties::ReplaceWithValue(node, value);
+            return Replace(value);
+          }
+          // TODO(turbofan): Alias analysis to the rescue?
+          return NoChange();
+        }
+        break;
+      }
+      case IrOpcode::kStoreBuffer:
+      case IrOpcode::kStoreElement: {
+        // These can never interfere with field loads.
+        break;
+      }
+      default: {
+        if (!effect->op()->HasProperty(Operator::kNoWrite) ||
+            effect->op()->EffectInputCount() != 1) {
+          return NoChange();
+        }
+        break;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
new file mode 100644
index 0000000..6917ce3
--- /dev/null
+++ b/src/compiler/load-elimination.h
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOAD_ELIMINATION_H_
+#define V8_COMPILER_LOAD_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoadElimination FINAL : public Reducer {
+ public:
+  LoadElimination() {}
+  ~LoadElimination() FINAL;
+
+  Reduction Reduce(Node* node) FINAL;
+
+ private:
+  Reduction ReduceLoadField(Node* node);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOAD_ELIMINATION_H_
diff --git a/src/compiler/loop-analysis.cc b/src/compiler/loop-analysis.cc
new file mode 100644
index 0000000..e1b703e
--- /dev/null
+++ b/src/compiler/loop-analysis.cc
@@ -0,0 +1,411 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef uint32_t LoopMarks;
+
+
+// TODO(titzer): don't assume entry edges have a particular index.
+// TODO(titzer): use a BitMatrix to generalize this algorithm.
+static const size_t kMaxLoops = 31;
+static const int kAssumedLoopEntryIndex = 0;  // assume loops are entered here.
+static const LoopMarks kVisited = 1;          // loop #0 is reserved.
+
+
+// Temporary information for each node during marking.
+struct NodeInfo {
+  Node* node;
+  NodeInfo* next;       // link in chaining loop members
+  LoopMarks forward;    // accumulated marks in the forward direction
+  LoopMarks backward;   // accumulated marks in the backward direction
+  LoopMarks loop_mark;  // loop mark for header nodes; encodes loop_num
+
+  bool MarkBackward(LoopMarks bw) {
+    LoopMarks prev = backward;
+    LoopMarks next = backward | bw;
+    backward = next;
+    return prev != next;
+  }
+
+  bool MarkForward(LoopMarks fw) {
+    LoopMarks prev = forward;
+    LoopMarks next = forward | fw;
+    forward = next;
+    return prev != next;
+  }
+
+  bool IsInLoop(size_t loop_num) {
+    DCHECK(loop_num > 0 && loop_num <= 31);
+    return forward & backward & (1 << loop_num);
+  }
+
+  bool IsLoopHeader() { return loop_mark != 0; }
+  bool IsInAnyLoop() { return (forward & backward) > kVisited; }
+
+  bool IsInHeaderForLoop(size_t loop_num) {
+    DCHECK(loop_num > 0);
+    return loop_mark == (kVisited | (1 << loop_num));
+  }
+};
+
+
+// Temporary loop info needed during traversal and building the loop tree.
+struct LoopInfo {
+  Node* header;
+  NodeInfo* header_list;
+  NodeInfo* body_list;
+  LoopTree::Loop* loop;
+};
+
+
+static const NodeInfo kEmptyNodeInfo = {nullptr, nullptr, 0, 0, 0};
+
+
+// Encapsulation of the loop finding algorithm.
+// -----------------------------------------------------------------------------
+// Conceptually, the contents of a loop are those nodes that are "between" the
+// loop header and the backedges of the loop. Graphs in the soup of nodes can
+// form improper cycles, so standard loop finding algorithms that work on CFGs
+// aren't sufficient. However, in valid TurboFan graphs, all cycles involve
+// either a {Loop} node or a phi. The {Loop} node itself and its accompanying
+// phis are treated together as a set referred to here as the loop header.
+// This loop finding algorithm works by traversing the graph in two directions,
+// first from nodes to their inputs, starting at {end}, then in the reverse
+// direction, from nodes to their uses, starting at loop headers.
+// 1 bit per loop per node per direction are required during the marking phase.
+// To handle nested loops correctly, the algorithm must filter some reachability
+// marks on edges into/out-of the loop header nodes.
+class LoopFinderImpl {
+ public:
+  LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
+      : end_(graph->end()),
+        queue_(zone),
+        queued_(graph, 2),
+        info_(graph->NodeCount(), kEmptyNodeInfo, zone),
+        loops_(zone),
+        loop_tree_(loop_tree),
+        loops_found_(0) {}
+
+  void Run() {
+    PropagateBackward();
+    PropagateForward();
+    FinishLoopTree();
+  }
+
+  void Print() {
+    // Print out the results.
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr) continue;
+      for (size_t i = 1; i <= loops_.size(); i++) {
+        if (ni.IsInLoop(i)) {
+          PrintF("X");
+        } else if (ni.forward & (1 << i)) {
+          PrintF("/");
+        } else if (ni.backward & (1 << i)) {
+          PrintF("\\");
+        } else {
+          PrintF(" ");
+        }
+      }
+      PrintF(" #%d:%s\n", ni.node->id(), ni.node->op()->mnemonic());
+    }
+
+    int i = 0;
+    for (LoopInfo& li : loops_) {
+      PrintF("Loop %d headed at #%d\n", i, li.header->id());
+      i++;
+    }
+
+    for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
+      PrintLoop(loop);
+    }
+  }
+
+ private:
+  Node* end_;
+  NodeDeque queue_;
+  NodeMarker<bool> queued_;
+  ZoneVector<NodeInfo> info_;
+  ZoneVector<LoopInfo> loops_;
+  LoopTree* loop_tree_;
+  size_t loops_found_;
+
+  // Propagate marks backward from loop headers.
+  void PropagateBackward() {
+    PropagateBackward(end_, kVisited);
+
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      queue_.pop_front();
+      queued_.Set(node, false);
+
+      // Setup loop headers first.
+      if (node->opcode() == IrOpcode::kLoop) {
+        // found the loop node first.
+        CreateLoopInfo(node);
+      } else if (node->opcode() == IrOpcode::kPhi ||
+                 node->opcode() == IrOpcode::kEffectPhi) {
+        // found a phi first.
+        Node* merge = node->InputAt(node->InputCount() - 1);
+        if (merge->opcode() == IrOpcode::kLoop) CreateLoopInfo(merge);
+      }
+
+      // Propagate reachability marks backwards from this node.
+      NodeInfo& ni = info(node);
+      if (ni.IsLoopHeader()) {
+        // Handle edges from loop header nodes specially.
+        for (int i = 0; i < node->InputCount(); i++) {
+          if (i == kAssumedLoopEntryIndex) {
+            // Don't propagate the loop mark backwards on the entry edge.
+            PropagateBackward(node->InputAt(0),
+                              kVisited | (ni.backward & ~ni.loop_mark));
+          } else {
+            // Only propagate the loop mark on backedges.
+            PropagateBackward(node->InputAt(i), ni.loop_mark);
+          }
+        }
+      } else {
+        // Propagate all loop marks backwards for a normal node.
+        for (Node* const input : node->inputs()) {
+          PropagateBackward(input, ni.backward);
+        }
+      }
+    }
+  }
+
+  // Make a new loop header for the given node.
+  void CreateLoopInfo(Node* node) {
+    NodeInfo& ni = info(node);
+    if (ni.IsLoopHeader()) return;  // loop already set up.
+
+    loops_found_++;
+    size_t loop_num = loops_.size() + 1;
+    CHECK(loops_found_ <= kMaxLoops);  // TODO(titzer): don't crash.
+    // Create a new loop.
+    loops_.push_back({node, nullptr, nullptr, nullptr});
+    loop_tree_->NewLoop();
+    LoopMarks loop_mark = kVisited | (1 << loop_num);
+    ni.node = node;
+    ni.loop_mark = loop_mark;
+
+    // Setup loop mark for phis attached to loop header.
+    for (Node* use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        info(use).loop_mark = loop_mark;
+      }
+    }
+  }
+
+  // Propagate marks forward from loops.
+  void PropagateForward() {
+    for (LoopInfo& li : loops_) {
+      queued_.Set(li.header, true);
+      queue_.push_back(li.header);
+      NodeInfo& ni = info(li.header);
+      ni.forward = ni.loop_mark;
+    }
+    // Propagate forward on paths that were backward reachable from backedges.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      queue_.pop_front();
+      queued_.Set(node, false);
+      NodeInfo& ni = info(node);
+      for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        NodeInfo& ui = info(use);
+        if (IsBackedge(use, ui, edge)) continue;  // skip backedges.
+        LoopMarks both = ni.forward & ui.backward;
+        if (ui.MarkForward(both) && !queued_.Get(use)) {
+          queued_.Set(use, true);
+          queue_.push_back(use);
+        }
+      }
+    }
+  }
+
+  bool IsBackedge(Node* use, NodeInfo& ui, Edge& edge) {
+    // TODO(titzer): checking for backedges here is ugly.
+    if (!ui.IsLoopHeader()) return false;
+    if (edge.index() == kAssumedLoopEntryIndex) return false;
+    if (use->opcode() == IrOpcode::kPhi ||
+        use->opcode() == IrOpcode::kEffectPhi) {
+      return !NodeProperties::IsControlEdge(edge);
+    }
+    return true;
+  }
+
+  NodeInfo& info(Node* node) {
+    NodeInfo& i = info_[node->id()];
+    if (i.node == nullptr) i.node = node;
+    return i;
+  }
+
+  void PropagateBackward(Node* node, LoopMarks marks) {
+    if (info(node).MarkBackward(marks) && !queued_.Get(node)) {
+      queue_.push_back(node);
+      queued_.Set(node, true);
+    }
+  }
+
+  void FinishLoopTree() {
+    // Degenerate cases.
+    if (loops_.size() == 0) return;
+    if (loops_.size() == 1) return FinishSingleLoop();
+
+    for (size_t i = 1; i <= loops_.size(); i++) ConnectLoopTree(i);
+
+    size_t count = 0;
+    // Place the node into the innermost nested loop of which it is a member.
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+
+      LoopInfo* innermost = nullptr;
+      size_t index = 0;
+      for (size_t i = 1; i <= loops_.size(); i++) {
+        if (ni.IsInLoop(i)) {
+          LoopInfo* loop = &loops_[i - 1];
+          if (innermost == nullptr ||
+              loop->loop->depth_ > innermost->loop->depth_) {
+            innermost = loop;
+            index = i;
+          }
+        }
+      }
+      if (ni.IsInHeaderForLoop(index)) {
+        ni.next = innermost->header_list;
+        innermost->header_list = &ni;
+      } else {
+        ni.next = innermost->body_list;
+        innermost->body_list = &ni;
+      }
+      count++;
+    }
+
+    // Serialize the node lists for loops into the loop tree.
+    loop_tree_->loop_nodes_.reserve(count);
+    for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
+      SerializeLoop(loop);
+    }
+  }
+
+  // Handle the simpler case of a single loop (no checks for nesting necessary).
+  void FinishSingleLoop() {
+    DCHECK(loops_.size() == 1);
+    DCHECK(loop_tree_->all_loops_.size() == 1);
+
+    // Place nodes into the loop header and body.
+    LoopInfo* li = &loops_[0];
+    li->loop = &loop_tree_->all_loops_[0];
+    loop_tree_->SetParent(nullptr, li->loop);
+    size_t count = 0;
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+      DCHECK(ni.IsInLoop(1));
+      if (ni.IsInHeaderForLoop(1)) {
+        ni.next = li->header_list;
+        li->header_list = &ni;
+      } else {
+        ni.next = li->body_list;
+        li->body_list = &ni;
+      }
+      count++;
+    }
+
+    // Serialize the node lists for the loop into the loop tree.
+    loop_tree_->loop_nodes_.reserve(count);
+    SerializeLoop(li->loop);
+  }
+
+  // Recursively serialize the list of header nodes and body nodes
+  // so that nested loops occupy nested intervals.
+  void SerializeLoop(LoopTree::Loop* loop) {
+    size_t loop_num = loop_tree_->LoopNum(loop);
+    LoopInfo& li = loops_[loop_num - 1];
+
+    // Serialize the header.
+    loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) {
+      loop_tree_->loop_nodes_.push_back(ni->node);
+      // TODO(titzer): lift loop count restriction.
+      loop_tree_->node_to_loop_num_[ni->node->id()] =
+          static_cast<uint8_t>(loop_num);
+    }
+
+    // Serialize the body.
+    loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) {
+      loop_tree_->loop_nodes_.push_back(ni->node);
+      // TODO(titzer): lift loop count restriction.
+      loop_tree_->node_to_loop_num_[ni->node->id()] =
+          static_cast<uint8_t>(loop_num);
+    }
+
+    // Serialize nested loops.
+    for (LoopTree::Loop* child : loop->children_) SerializeLoop(child);
+
+    loop->body_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+  }
+
+  // Connect the LoopTree loops to their parents recursively.
+  LoopTree::Loop* ConnectLoopTree(size_t loop_num) {
+    LoopInfo& li = loops_[loop_num - 1];
+    if (li.loop != nullptr) return li.loop;
+
+    NodeInfo& ni = info(li.header);
+    LoopTree::Loop* parent = nullptr;
+    for (size_t i = 1; i <= loops_.size(); i++) {
+      if (i == loop_num) continue;
+      if (ni.IsInLoop(i)) {
+        // recursively create potential parent loops first.
+        LoopTree::Loop* upper = ConnectLoopTree(i);
+        if (parent == nullptr || upper->depth_ > parent->depth_) {
+          parent = upper;
+        }
+      }
+    }
+    li.loop = &loop_tree_->all_loops_[loop_num - 1];
+    loop_tree_->SetParent(parent, li.loop);
+    return li.loop;
+  }
+
+  void PrintLoop(LoopTree::Loop* loop) {
+    for (int i = 0; i < loop->depth_; i++) PrintF("  ");
+    PrintF("Loop depth = %d ", loop->depth_);
+    int i = loop->header_start_;
+    while (i < loop->body_start_) {
+      PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id());
+    }
+    while (i < loop->body_end_) {
+      PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id());
+    }
+    PrintF("\n");
+    for (LoopTree::Loop* child : loop->children_) PrintLoop(child);
+  }
+};
+
+
+LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
+  LoopTree* loop_tree =
+      new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
+  LoopFinderImpl finder(graph, loop_tree, zone);
+  finder.Run();
+  if (FLAG_trace_turbo_graph) {
+    finder.Print();
+  }
+  return loop_tree;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
new file mode 100644
index 0000000..8c8d19a
--- /dev/null
+++ b/src/compiler/loop-analysis.h
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_ANALYSIS_H_
+#define V8_COMPILER_LOOP_ANALYSIS_H_
+
+#include "src/base/iterator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoopFinderImpl;
+
+typedef base::iterator_range<Node**> NodeRange;
+
+// Represents a tree of loops in a graph.
+class LoopTree : public ZoneObject {
+ public:
+  LoopTree(size_t num_nodes, Zone* zone)
+      : zone_(zone),
+        outer_loops_(zone),
+        all_loops_(zone),
+        node_to_loop_num_(static_cast<int>(num_nodes), 0, zone),
+        loop_nodes_(zone) {}
+
+  // Represents a loop in the tree of loops, including the header nodes,
+  // the body, and any nested loops.
+  class Loop {
+   public:
+    Loop* parent() const { return parent_; }
+    const ZoneVector<Loop*>& children() const { return children_; }
+    size_t HeaderSize() const { return body_start_ - header_start_; }
+    size_t BodySize() const { return body_end_ - body_start_; }
+    size_t TotalSize() const { return body_end_ - header_start_; }
+
+   private:
+    friend class LoopTree;
+    friend class LoopFinderImpl;
+
+    explicit Loop(Zone* zone)
+        : parent_(nullptr),
+          depth_(0),
+          children_(zone),
+          header_start_(-1),
+          body_start_(-1),
+          body_end_(-1) {}
+    Loop* parent_;
+    int depth_;
+    ZoneVector<Loop*> children_;
+    int header_start_;
+    int body_start_;
+    int body_end_;
+  };
+
+  // Return the innermost nested loop, if any, that contains {node}.
+  Loop* ContainingLoop(Node* node) {
+    if (node->id() >= static_cast<int>(node_to_loop_num_.size()))
+      return nullptr;
+    uint8_t num = node_to_loop_num_[node->id()];
+    return num > 0 ? &all_loops_[num - 1] : nullptr;
+  }
+
+  // Check if the {loop} contains the {node}, either directly or by containing
+  // a nested loop that contains {node}.
+  bool Contains(Loop* loop, Node* node) {
+    for (Loop* c = ContainingLoop(node); c != nullptr; c = c->parent_) {
+      if (c == loop) return true;
+    }
+    return false;
+  }
+
+  // Return the list of outer loops.
+  const ZoneVector<Loop*>& outer_loops() const { return outer_loops_; }
+
+  // Return the unique loop number for a given loop. Loop numbers start at {1}.
+  int LoopNum(Loop* loop) const {
+    return 1 + static_cast<int>(loop - &all_loops_[0]);
+  }
+
+  // Return a range which can iterate over the header nodes of {loop}.
+  NodeRange HeaderNodes(Loop* loop) {
+    return NodeRange(&loop_nodes_[0] + loop->header_start_,
+                     &loop_nodes_[0] + loop->body_start_);
+  }
+
+  // Return a range which can iterate over the body nodes of {loop}.
+  NodeRange BodyNodes(Loop* loop) {
+    return NodeRange(&loop_nodes_[0] + loop->body_start_,
+                     &loop_nodes_[0] + loop->body_end_);
+  }
+
+ private:
+  friend class LoopFinderImpl;
+
+  Loop* NewLoop() {
+    all_loops_.push_back(Loop(zone_));
+    Loop* result = &all_loops_.back();
+    return result;
+  }
+
+  void SetParent(Loop* parent, Loop* child) {
+    if (parent != nullptr) {
+      parent->children_.push_back(child);
+      child->parent_ = parent;
+      child->depth_ = parent->depth_ + 1;
+    } else {
+      outer_loops_.push_back(child);
+    }
+  }
+
+  Zone* zone_;
+  ZoneVector<Loop*> outer_loops_;
+  ZoneVector<Loop> all_loops_;
+  // TODO(titzer): lift loop count restriction.
+  ZoneVector<uint8_t> node_to_loop_num_;
+  ZoneVector<Node*> loop_nodes_;
+};
+
+
+class LoopFinder {
+ public:
+  // Build a loop tree for the entire graph.
+  static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOOP_ANALYSIS_H_
diff --git a/src/compiler/machine-operator-reducer-unittest.cc b/src/compiler/machine-operator-reducer-unittest.cc
deleted file mode 100644
index f3073ab..0000000
--- a/src/compiler/machine-operator-reducer-unittest.cc
+++ /dev/null
@@ -1,616 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/bits.h"
-#include "src/compiler/graph-unittest.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator-reducer.h"
-#include "src/compiler/typer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class MachineOperatorReducerTest : public GraphTest {
- public:
-  explicit MachineOperatorReducerTest(int num_parameters = 2)
-      : GraphTest(num_parameters) {}
-
- protected:
-  Reduction Reduce(Node* node) {
-    Typer typer(zone());
-    JSOperatorBuilder javascript(zone());
-    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine_);
-    MachineOperatorReducer reducer(&jsgraph);
-    return reducer.Reduce(node);
-  }
-
-  MachineOperatorBuilder* machine() { return &machine_; }
-
- private:
-  MachineOperatorBuilder machine_;
-};
-
-
-template <typename T>
-class MachineOperatorReducerTestWithParam
-    : public MachineOperatorReducerTest,
-      public ::testing::WithParamInterface<T> {
- public:
-  explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
-      : MachineOperatorReducerTest(num_parameters) {}
-  virtual ~MachineOperatorReducerTestWithParam() {}
-};
-
-
-namespace {
-
-static const float kFloat32Values[] = {
-    -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
-    -1.22813e+35f,                           -1.20555e+35f, -1.34584e+34f,
-    -1.0079e+32f,                            -6.49364e+26f, -3.06077e+25f,
-    -1.46821e+25f,                           -1.17658e+23f, -1.9617e+22f,
-    -2.7357e+20f,                            -1.48708e+13f, -1.89633e+12f,
-    -4.66622e+11f,                           -2.22581e+11f, -1.45381e+10f,
-    -1.3956e+09f,                            -1.32951e+09f, -1.30721e+09f,
-    -1.19756e+09f,                           -9.26822e+08f, -6.35647e+08f,
-    -4.00037e+08f,                           -1.81227e+08f, -5.09256e+07f,
-    -964300.0f,                              -192446.0f,    -28455.0f,
-    -27194.0f,                               -26401.0f,     -20575.0f,
-    -17069.0f,                               -9167.0f,      -960.178f,
-    -113.0f,                                 -62.0f,        -15.0f,
-    -7.0f,                                   -0.0256635f,   -4.60374e-07f,
-    -3.63759e-10f,                           -4.30175e-14f, -5.27385e-15f,
-    -1.48084e-15f,                           -1.05755e-19f, -3.2995e-21f,
-    -1.67354e-23f,                           -1.11885e-23f, -1.78506e-30f,
-    -5.07594e-31f,                           -3.65799e-31f, -1.43718e-34f,
-    -1.27126e-38f,                           -0.0f,         0.0f,
-    1.17549e-38f,                            1.56657e-37f,  4.08512e-29f,
-    3.31357e-28f,                            6.25073e-22f,  4.1723e-13f,
-    1.44343e-09f,                            5.27004e-08f,  9.48298e-08f,
-    5.57888e-07f,                            4.89988e-05f,  0.244326f,
-    12.4895f,                                19.0f,         47.0f,
-    106.0f,                                  538.324f,      564.536f,
-    819.124f,                                7048.0f,       12611.0f,
-    19878.0f,                                20309.0f,      797056.0f,
-    1.77219e+09f,                            1.51116e+11f,  4.18193e+13f,
-    3.59167e+16f,                            3.38211e+19f,  2.67488e+20f,
-    1.78831e+21f,                            9.20914e+21f,  8.35654e+23f,
-    1.4495e+24f,                             5.94015e+25f,  4.43608e+30f,
-    2.44502e+33f,                            2.61152e+33f,  1.38178e+37f,
-    1.71306e+37f,                            3.31899e+38f,  3.40282e+38f,
-    std::numeric_limits<float>::infinity()};
-
-
-static const double kFloat64Values[] = {
-    -V8_INFINITY,  -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
-    -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
-    -1.67813e+72,  -2.3382e+55,   -3.179e+30,    -1.441e+09,    -1.0647e+09,
-    -7.99361e+08,  -5.77375e+08,  -2.20984e+08,  -32757,        -13171,
-    -9970,         -3984,         -107,          -105,          -92,
-    -77,           -61,           -0.000208163,  -1.86685e-06,  -1.17296e-10,
-    -9.26358e-11,  -5.08004e-60,  -1.74753e-65,  -1.06561e-71,  -5.67879e-79,
-    -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
-    -4.40497e-267, -2.19666e-273, -4.9998e-276,  -5.59821e-278, -2.03855e-282,
-    -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0,          0.0,
-    2.22507e-308,  1.30127e-270,  7.62898e-260,  4.00313e-249,  3.16829e-233,
-    1.85244e-228,  2.03544e-129,  1.35126e-110,  1.01182e-106,  5.26333e-94,
-    1.35292e-90,   2.85394e-83,   1.78323e-77,   5.4967e-57,    1.03207e-25,
-    4.57401e-25,   1.58738e-05,   2,             125,           2310,
-    9636,          14802,         17168,         28945,         29305,
-    4.81336e+07,   1.41207e+08,   4.65962e+08,   1.40499e+09,   2.12648e+09,
-    8.80006e+30,   1.4446e+45,    1.12164e+54,   2.48188e+89,   6.71121e+102,
-    3.074e+112,    4.9699e+152,   5.58383e+166,  4.30654e+172,  7.08824e+185,
-    9.6586e+214,   2.028e+223,    6.63277e+243,  1.56192e+261,  1.23202e+269,
-    5.72883e+289,  8.5798e+290,   1.40256e+294,  1.79769e+308,  V8_INFINITY};
-
-
-static const int32_t kInt32Values[] = {
-    -2147483647 - 1, -1914954528, -1698749618, -1578693386, -1577976073,
-    -1573998034,     -1529085059, -1499540537, -1299205097, -1090814845,
-    -938186388,      -806828902,  -750927650,  -520676892,  -513661538,
-    -453036354,      -433622833,  -282638793,  -28375,      -27788,
-    -22770,          -18806,      -14173,      -11956,      -11200,
-    -10212,          -8160,       -3751,       -2758,       -1522,
-    -121,            -120,        -118,        -117,        -106,
-    -84,             -80,         -74,         -59,         -52,
-    -48,             -39,         -35,         -17,         -11,
-    -10,             -9,          -7,          -5,          0,
-    9,               12,          17,          23,          29,
-    31,              33,          35,          40,          47,
-    55,              56,          62,          64,          67,
-    68,              69,          74,          79,          84,
-    89,              90,          97,          104,         118,
-    124,             126,         127,         7278,        17787,
-    24136,           24202,       25570,       26680,       30242,
-    32399,           420886487,   642166225,   821912648,   822577803,
-    851385718,       1212241078,  1411419304,  1589626102,  1596437184,
-    1876245816,      1954730266,  2008792749,  2045320228,  2147483647};
-
-
-static const int64_t kInt64Values[] = {
-    V8_INT64_C(-9223372036854775807) - 1, V8_INT64_C(-8974392461363618006),
-    V8_INT64_C(-8874367046689588135),     V8_INT64_C(-8269197512118230839),
-    V8_INT64_C(-8146091527100606733),     V8_INT64_C(-7550917981466150848),
-    V8_INT64_C(-7216590251577894337),     V8_INT64_C(-6464086891160048440),
-    V8_INT64_C(-6365616494908257190),     V8_INT64_C(-6305630541365849726),
-    V8_INT64_C(-5982222642272245453),     V8_INT64_C(-5510103099058504169),
-    V8_INT64_C(-5496838675802432701),     V8_INT64_C(-4047626578868642657),
-    V8_INT64_C(-4033755046900164544),     V8_INT64_C(-3554299241457877041),
-    V8_INT64_C(-2482258764588614470),     V8_INT64_C(-1688515425526875335),
-    V8_INT64_C(-924784137176548532),      V8_INT64_C(-725316567157391307),
-    V8_INT64_C(-439022654781092241),      V8_INT64_C(-105545757668917080),
-    V8_INT64_C(-2088319373),              V8_INT64_C(-2073699916),
-    V8_INT64_C(-1844949911),              V8_INT64_C(-1831090548),
-    V8_INT64_C(-1756711933),              V8_INT64_C(-1559409497),
-    V8_INT64_C(-1281179700),              V8_INT64_C(-1211513985),
-    V8_INT64_C(-1182371520),              V8_INT64_C(-785934753),
-    V8_INT64_C(-767480697),               V8_INT64_C(-705745662),
-    V8_INT64_C(-514362436),               V8_INT64_C(-459916580),
-    V8_INT64_C(-312328082),               V8_INT64_C(-302949707),
-    V8_INT64_C(-285499304),               V8_INT64_C(-125701262),
-    V8_INT64_C(-95139843),                V8_INT64_C(-32768),
-    V8_INT64_C(-27542),                   V8_INT64_C(-23600),
-    V8_INT64_C(-18582),                   V8_INT64_C(-17770),
-    V8_INT64_C(-9086),                    V8_INT64_C(-9010),
-    V8_INT64_C(-8244),                    V8_INT64_C(-2890),
-    V8_INT64_C(-103),                     V8_INT64_C(-34),
-    V8_INT64_C(-27),                      V8_INT64_C(-25),
-    V8_INT64_C(-9),                       V8_INT64_C(-7),
-    V8_INT64_C(0),                        V8_INT64_C(2),
-    V8_INT64_C(38),                       V8_INT64_C(58),
-    V8_INT64_C(65),                       V8_INT64_C(93),
-    V8_INT64_C(111),                      V8_INT64_C(1003),
-    V8_INT64_C(1267),                     V8_INT64_C(12797),
-    V8_INT64_C(23122),                    V8_INT64_C(28200),
-    V8_INT64_C(30888),                    V8_INT64_C(42648848),
-    V8_INT64_C(116836693),                V8_INT64_C(263003643),
-    V8_INT64_C(571039860),                V8_INT64_C(1079398689),
-    V8_INT64_C(1145196402),               V8_INT64_C(1184846321),
-    V8_INT64_C(1758281648),               V8_INT64_C(1859991374),
-    V8_INT64_C(1960251588),               V8_INT64_C(2042443199),
-    V8_INT64_C(296220586027987448),       V8_INT64_C(1015494173071134726),
-    V8_INT64_C(1151237951914455318),      V8_INT64_C(1331941174616854174),
-    V8_INT64_C(2022020418667972654),      V8_INT64_C(2450251424374977035),
-    V8_INT64_C(3668393562685561486),      V8_INT64_C(4858229301215502171),
-    V8_INT64_C(4919426235170669383),      V8_INT64_C(5034286595330341762),
-    V8_INT64_C(5055797915536941182),      V8_INT64_C(6072389716149252074),
-    V8_INT64_C(6185309910199801210),      V8_INT64_C(6297328311011094138),
-    V8_INT64_C(6932372858072165827),      V8_INT64_C(8483640924987737210),
-    V8_INT64_C(8663764179455849203),      V8_INT64_C(8877197042645298254),
-    V8_INT64_C(8901543506779157333),      V8_INT64_C(9223372036854775807)};
-
-
-static const uint32_t kUint32Values[] = {
-    0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
-    0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
-    0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
-    0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
-    0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
-    0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
-    0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
-    0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Unary operators
-
-
-namespace {
-
-struct UnaryOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)();
-  const char* constructor_name;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
-  return os << unop.constructor_name;
-}
-
-
-static const UnaryOperator kUnaryOperators[] = {
-    {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
-    {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
-    {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
-    {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
-    {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
-    {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
-    {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
-    {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
-
-}  // namespace
-
-
-typedef MachineOperatorReducerTestWithParam<UnaryOperator>
-    MachineUnaryOperatorReducerTest;
-
-
-TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
-  const UnaryOperator unop = GetParam();
-  Reduction reduction =
-      Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
-  EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
-                        MachineUnaryOperatorReducerTest,
-                        ::testing::ValuesIn(kUnaryOperators));
-
-
-// -----------------------------------------------------------------------------
-// ChangeFloat64ToFloat32
-
-
-TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
-  TRACED_FOREACH(float, x, kFloat32Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeFloat64ToInt32
-
-
-TEST_F(MachineOperatorReducerTest,
-       ChangeFloat64ToInt32WithChangeInt32ToFloat64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->ChangeFloat64ToInt32(),
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
-  TRACED_FOREACH(int32_t, x, kInt32Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->ChangeFloat64ToInt32(), Float64Constant(FastI2D(x))));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsInt32Constant(x));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeFloat64ToUint32
-
-
-TEST_F(MachineOperatorReducerTest,
-       ChangeFloat64ToUint32WithChangeUint32ToFloat64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->ChangeFloat64ToUint32(),
-      graph()->NewNode(machine()->ChangeUint32ToFloat64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
-  TRACED_FOREACH(uint32_t, x, kUint32Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeInt32ToFloat64
-
-
-TEST_F(MachineOperatorReducerTest, ChangeInt32ToFloat64WithConstant) {
-  TRACED_FOREACH(int32_t, x, kInt32Values) {
-    Reduction reduction = Reduce(
-        graph()->NewNode(machine()->ChangeInt32ToFloat64(), Int32Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastI2D(x)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeInt32ToInt64
-
-
-TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
-  TRACED_FOREACH(int32_t, x, kInt32Values) {
-    Reduction reduction = Reduce(
-        graph()->NewNode(machine()->ChangeInt32ToInt64(), Int32Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeUint32ToFloat64
-
-
-TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
-  TRACED_FOREACH(uint32_t, x, kUint32Values) {
-    Reduction reduction =
-        Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
-                                Int32Constant(bit_cast<int32_t>(x))));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeUint32ToUint64
-
-
-TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
-  TRACED_FOREACH(uint32_t, x, kUint32Values) {
-    Reduction reduction =
-        Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
-                                Int32Constant(bit_cast<int32_t>(x))));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(),
-                IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// TruncateFloat64ToFloat32
-
-
-TEST_F(MachineOperatorReducerTest,
-       TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->TruncateFloat64ToFloat32(),
-      graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
-  TRACED_FOREACH(double, x, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// TruncateFloat64ToInt32
-
-
-TEST_F(MachineOperatorReducerTest,
-       TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->TruncateFloat64ToInt32(),
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
-  TRACED_FOREACH(double, x, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// TruncateInt64ToInt32
-
-
-TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithChangeInt32ToInt64) {
-  Node* value = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      machine()->TruncateInt64ToInt32(),
-      graph()->NewNode(machine()->ChangeInt32ToInt64(), value)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(value, reduction.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
-  TRACED_FOREACH(int64_t, x, kInt64Values) {
-    Reduction reduction = Reduce(
-        graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(),
-                IsInt32Constant(bit_cast<int32_t>(
-                    static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Word32Ror
-
-
-TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
-  Node* value = Parameter(0);
-  Node* shift = Parameter(1);
-  Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
-  Node* shr = graph()->NewNode(
-      machine()->Word32Shr(), value,
-      graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
-
-  // (x << y) | (x >> (32 - y)) => x ror y
-  Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
-  Reduction reduction1 = Reduce(node1);
-  EXPECT_TRUE(reduction1.Changed());
-  EXPECT_EQ(reduction1.replacement(), node1);
-  EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
-
-  // (x >> (32 - y)) | (x << y) => x ror y
-  Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
-  Reduction reduction2 = Reduce(node2);
-  EXPECT_TRUE(reduction2.Changed());
-  EXPECT_EQ(reduction2.replacement(), node2);
-  EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
-}
-
-
-TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
-  Node* value = Parameter(0);
-  TRACED_FORRANGE(int32_t, k, 0, 31) {
-    Node* shl =
-        graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(k));
-    Node* shr =
-        graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
-
-    // (x << K) | (x >> ((32 - K) - y)) => x ror K
-    Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
-    Reduction reduction1 = Reduce(node1);
-    EXPECT_TRUE(reduction1.Changed());
-    EXPECT_EQ(reduction1.replacement(), node1);
-    EXPECT_THAT(reduction1.replacement(),
-                IsWord32Ror(value, IsInt32Constant(k)));
-
-    // (x >> (32 - K)) | (x << K) => x ror K
-    Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
-    Reduction reduction2 = Reduce(node2);
-    EXPECT_TRUE(reduction2.Changed());
-    EXPECT_EQ(reduction2.replacement(), node2);
-    EXPECT_THAT(reduction2.replacement(),
-                IsWord32Ror(value, IsInt32Constant(k)));
-  }
-}
-
-
-TEST_F(MachineOperatorReducerTest, Word32RorWithZeroShift) {
-  Node* value = Parameter(0);
-  Node* node =
-      graph()->NewNode(machine()->Word32Ror(), value, Int32Constant(0));
-  Reduction reduction = Reduce(node);
-  EXPECT_TRUE(reduction.Changed());
-  EXPECT_EQ(reduction.replacement(), value);
-}
-
-
-TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
-  TRACED_FOREACH(int32_t, x, kUint32Values) {
-    TRACED_FORRANGE(int32_t, y, 0, 31) {
-      Node* node = graph()->NewNode(machine()->Word32Ror(), Int32Constant(x),
-                                    Int32Constant(y));
-      Reduction reduction = Reduce(node);
-      EXPECT_TRUE(reduction.Changed());
-      EXPECT_THAT(reduction.replacement(),
-                  IsInt32Constant(base::bits::RotateRight32(x, y)));
-    }
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Int32AddWithOverflow
-
-
-TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
-  Node* p0 = Parameter(0);
-  {
-    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
-                                 Int32Constant(0), p0);
-
-    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
-
-    r = Reduce(graph()->NewNode(common()->Projection(0), add));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_EQ(p0, r.replacement());
-  }
-  {
-    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
-                                 Int32Constant(0));
-
-    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
-
-    r = Reduce(graph()->NewNode(common()->Projection(0), add));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_EQ(p0, r.replacement());
-  }
-}
-
-
-TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
-  TRACED_FOREACH(int32_t, x, kInt32Values) {
-    TRACED_FOREACH(int32_t, y, kInt32Values) {
-      int32_t z;
-      Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
-                                   Int32Constant(x), Int32Constant(y));
-
-      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
-      ASSERT_TRUE(r.Changed());
-      EXPECT_THAT(r.replacement(),
-                  IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
-
-      r = Reduce(graph()->NewNode(common()->Projection(0), add));
-      ASSERT_TRUE(r.Changed());
-      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
-    }
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// Int32SubWithOverflow
-
-
-TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
-  Node* p0 = Parameter(0);
-  Node* add =
-      graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
-
-  Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
-  ASSERT_TRUE(r.Changed());
-  EXPECT_THAT(r.replacement(), IsInt32Constant(0));
-
-  r = Reduce(graph()->NewNode(common()->Projection(0), add));
-  ASSERT_TRUE(r.Changed());
-  EXPECT_EQ(p0, r.replacement());
-}
-
-
-TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
-  TRACED_FOREACH(int32_t, x, kInt32Values) {
-    TRACED_FOREACH(int32_t, y, kInt32Values) {
-      int32_t z;
-      Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
-                                   Int32Constant(x), Int32Constant(y));
-
-      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
-      ASSERT_TRUE(r.Changed());
-      EXPECT_THAT(r.replacement(),
-                  IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
-
-      r = Reduce(graph()->NewNode(common()->Projection(0), add));
-      ASSERT_TRUE(r.Changed());
-      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
-    }
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 9328547..c3e45a1 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -5,7 +5,9 @@
 #include "src/compiler/machine-operator-reducer.h"
 
 #include "src/base/bits.h"
-#include "src/compiler/generic-node-inl.h"
+#include "src/base/division-by-constant.h"
+#include "src/codegen.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
@@ -41,81 +43,90 @@
 }
 
 
+Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
+  Node* const node = graph()->NewNode(machine()->Word32And(), lhs, rhs);
+  Reduction const reduction = ReduceWord32And(node);
+  return reduction.Changed() ? reduction.replacement() : node;
+}
+
+
+Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
+  if (rhs == 0) return lhs;
+  return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
+}
+
+
+Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
+  if (rhs == 0) return lhs;
+  return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
+}
+
+
+Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
+  return graph()->NewNode(machine()->Word32Equal(), lhs, rhs);
+}
+
+
+Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
+  Node* const node = graph()->NewNode(machine()->Int32Add(), lhs, rhs);
+  Reduction const reduction = ReduceInt32Add(node);
+  return reduction.Changed() ? reduction.replacement() : node;
+}
+
+
+Node* MachineOperatorReducer::Int32Sub(Node* lhs, Node* rhs) {
+  return graph()->NewNode(machine()->Int32Sub(), lhs, rhs);
+}
+
+
+Node* MachineOperatorReducer::Int32Mul(Node* lhs, Node* rhs) {
+  return graph()->NewNode(machine()->Int32Mul(), lhs, rhs);
+}
+
+
+Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
+  DCHECK_NE(0, divisor);
+  DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor);
+  base::MagicNumbersForDivision<uint32_t> const mag =
+      base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+  Node* quotient = graph()->NewNode(machine()->Int32MulHigh(), dividend,
+                                    Uint32Constant(mag.multiplier));
+  if (divisor > 0 && bit_cast<int32_t>(mag.multiplier) < 0) {
+    quotient = Int32Add(quotient, dividend);
+  } else if (divisor < 0 && bit_cast<int32_t>(mag.multiplier) > 0) {
+    quotient = Int32Sub(quotient, dividend);
+  }
+  return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
+}
+
+
+Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
+  DCHECK_LT(0, divisor);
+  base::MagicNumbersForDivision<uint32_t> const mag =
+      base::UnsignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+  Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
+                                    Uint32Constant(mag.multiplier));
+  if (mag.add) {
+    DCHECK_LE(1, mag.shift);
+    quotient = Word32Shr(
+        Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
+        mag.shift - 1);
+  } else {
+    quotient = Word32Shr(quotient, mag.shift);
+  }
+  return quotient;
+}
+
+
 // Perform constant folding and strength reduction on machine operators.
 Reduction MachineOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kProjection:
       return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
-    case IrOpcode::kWord32And: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
-      if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
-      if (m.IsFoldable()) {                                   // K & K  => K
-        return ReplaceInt32(m.left().Value() & m.right().Value());
-      }
-      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
-      break;
-    }
-    case IrOpcode::kWord32Or: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
-      if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
-      if (m.IsFoldable()) {                                    // K | K  => K
-        return ReplaceInt32(m.left().Value() | m.right().Value());
-      }
-      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
-      if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
-        Int32BinopMatcher mleft(m.left().node());
-        Int32BinopMatcher mright(m.right().node());
-        if (mleft.left().node() == mright.left().node()) {
-          // (x << y) | (x >> (32 - y)) => x ror y
-          if (mright.right().IsInt32Sub()) {
-            Int32BinopMatcher mrightright(mright.right().node());
-            if (mrightright.left().Is(32) &&
-                mrightright.right().node() == mleft.right().node()) {
-              node->set_op(machine()->Word32Ror());
-              node->ReplaceInput(0, mleft.left().node());
-              node->ReplaceInput(1, mleft.right().node());
-              return Changed(node);
-            }
-          }
-          // (x << K) | (x >> (32 - K)) => x ror K
-          if (mleft.right().IsInRange(0, 31) &&
-              mright.right().Is(32 - mleft.right().Value())) {
-            node->set_op(machine()->Word32Ror());
-            node->ReplaceInput(0, mleft.left().node());
-            node->ReplaceInput(1, mleft.right().node());
-            return Changed(node);
-          }
-        }
-      }
-      if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
-        // (x >> (32 - y)) | (x << y)  => x ror y
-        Int32BinopMatcher mleft(m.left().node());
-        Int32BinopMatcher mright(m.right().node());
-        if (mleft.left().node() == mright.left().node()) {
-          if (mleft.right().IsInt32Sub()) {
-            Int32BinopMatcher mleftright(mleft.right().node());
-            if (mleftright.left().Is(32) &&
-                mleftright.right().node() == mright.right().node()) {
-              node->set_op(machine()->Word32Ror());
-              node->ReplaceInput(0, mright.left().node());
-              node->ReplaceInput(1, mright.right().node());
-              return Changed(node);
-            }
-          }
-          // (x >> (32 - K)) | (x << K) => x ror K
-          if (mright.right().IsInRange(0, 31) &&
-              mleft.right().Is(32 - mright.right().Value())) {
-            node->set_op(machine()->Word32Ror());
-            node->ReplaceInput(0, mright.left().node());
-            node->ReplaceInput(1, mright.right().node());
-            return Changed(node);
-          }
-        }
-      }
-      break;
-    }
+    case IrOpcode::kWord32And:
+      return ReduceWord32And(node);
+    case IrOpcode::kWord32Or:
+      return ReduceWord32Or(node);
     case IrOpcode::kWord32Xor: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
@@ -123,23 +134,23 @@
         return ReplaceInt32(m.left().Value() ^ m.right().Value());
       }
       if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
-      break;
-    }
-    case IrOpcode::kWord32Shl: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
-      if (m.IsFoldable()) {                                  // K << K => K
-        return ReplaceInt32(m.left().Value() << m.right().Value());
+      if (m.left().IsWord32Xor() && m.right().Is(-1)) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().Is(-1)) {  // (x ^ -1) ^ -1 => x
+          return Replace(mleft.left().node());
+        }
       }
       break;
     }
+    case IrOpcode::kWord32Shl:
+      return ReduceWord32Shl(node);
     case IrOpcode::kWord32Shr: {
       Uint32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
       if (m.IsFoldable()) {                                  // K >>> K => K
         return ReplaceInt32(m.left().Value() >> m.right().Value());
       }
-      break;
+      return ReduceWord32Shifts(node);
     }
     case IrOpcode::kWord32Sar: {
       Int32BinopMatcher m(node);
@@ -147,7 +158,22 @@
       if (m.IsFoldable()) {                                  // K >> K => K
         return ReplaceInt32(m.left().Value() >> m.right().Value());
       }
-      break;
+      if (m.left().IsWord32Shl()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.left().IsLoad()) {
+          LoadRepresentation const rep =
+              OpParameter<LoadRepresentation>(mleft.left().node());
+          if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
+            // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
+            return Replace(mleft.left().node());
+          }
+          if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
+            // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
+            return Replace(mleft.left().node());
+          }
+        }
+      }
+      return ReduceWord32Shifts(node);
     }
     case IrOpcode::kWord32Ror: {
       Int32BinopMatcher m(node);
@@ -173,15 +199,23 @@
       if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
       break;
     }
-    case IrOpcode::kInt32Add: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
-      if (m.IsFoldable()) {                                  // K + K => K
-        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
-                            static_cast<uint32_t>(m.right().Value()));
+    case IrOpcode::kWord64Equal: {
+      Int64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K == K => K
+        return ReplaceBool(m.left().Value() == m.right().Value());
       }
+      if (m.left().IsInt64Sub() && m.right().Is(0)) {  // x - y == 0 => x == y
+        Int64BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
       break;
     }
+    case IrOpcode::kInt32Add:
+      return ReduceInt32Add(node);
     case IrOpcode::kInt32Sub: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
@@ -208,74 +242,19 @@
       if (m.right().IsPowerOf2()) {  // x * 2^n => x << n
         node->set_op(machine()->Word32Shl());
         node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
-        return Changed(node);
+        Reduction reduction = ReduceWord32Shl(node);
+        return reduction.Changed() ? reduction : Changed(node);
       }
       break;
     }
-    case IrOpcode::kInt32Div: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
-      // TODO(turbofan): if (m.left().Is(0))
-      // TODO(turbofan): if (m.right().IsPowerOf2())
-      // TODO(turbofan): if (m.right().Is(0))
-      // TODO(turbofan): if (m.LeftEqualsRight())
-      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
-        if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
-        return ReplaceInt32(m.left().Value() / m.right().Value());
-      }
-      if (m.right().Is(-1)) {  // x / -1 => 0 - x
-        node->set_op(machine()->Int32Sub());
-        node->ReplaceInput(0, Int32Constant(0));
-        node->ReplaceInput(1, m.left().node());
-        return Changed(node);
-      }
-      break;
-    }
-    case IrOpcode::kInt32UDiv: {
-      Uint32BinopMatcher m(node);
-      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
-      // TODO(turbofan): if (m.left().Is(0))
-      // TODO(turbofan): if (m.right().Is(0))
-      // TODO(turbofan): if (m.LeftEqualsRight())
-      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
-        return ReplaceInt32(m.left().Value() / m.right().Value());
-      }
-      if (m.right().IsPowerOf2()) {  // x / 2^n => x >> n
-        node->set_op(machine()->Word32Shr());
-        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
-        return Changed(node);
-      }
-      break;
-    }
-    case IrOpcode::kInt32Mod: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(1)) return ReplaceInt32(0);   // x % 1  => 0
-      if (m.right().Is(-1)) return ReplaceInt32(0);  // x % -1 => 0
-      // TODO(turbofan): if (m.left().Is(0))
-      // TODO(turbofan): if (m.right().IsPowerOf2())
-      // TODO(turbofan): if (m.right().Is(0))
-      // TODO(turbofan): if (m.LeftEqualsRight())
-      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
-        return ReplaceInt32(m.left().Value() % m.right().Value());
-      }
-      break;
-    }
-    case IrOpcode::kInt32UMod: {
-      Uint32BinopMatcher m(node);
-      if (m.right().Is(1)) return ReplaceInt32(0);  // x % 1 => 0
-      // TODO(turbofan): if (m.left().Is(0))
-      // TODO(turbofan): if (m.right().Is(0))
-      // TODO(turbofan): if (m.LeftEqualsRight())
-      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
-        return ReplaceInt32(m.left().Value() % m.right().Value());
-      }
-      if (m.right().IsPowerOf2()) {  // x % 2^n => x & 2^n-1
-        node->set_op(machine()->Word32And());
-        node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
-        return Changed(node);
-      }
-      break;
-    }
+    case IrOpcode::kInt32Div:
+      return ReduceInt32Div(node);
+    case IrOpcode::kUint32Div:
+      return ReduceUint32Div(node);
+    case IrOpcode::kInt32Mod:
+      return ReduceInt32Mod(node);
+    case IrOpcode::kUint32Mod:
+      return ReduceUint32Mod(node);
     case IrOpcode::kInt32LessThan: {
       Int32BinopMatcher m(node);
       if (m.IsFoldable()) {  // K < K => K
@@ -324,6 +303,21 @@
         return ReplaceBool(m.left().Value() < m.right().Value());
       }
       if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      if (m.left().IsWord32Sar() && m.right().HasValue()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().HasValue()) {
+          // (x >> K) < C => x < (C << K)
+          // when C < (M >> K)
+          const uint32_t c = m.right().Value();
+          const uint32_t k = mleft.right().Value() & 0x1f;
+          if (c < static_cast<uint32_t>(kMaxInt >> k)) {
+            node->ReplaceInput(0, mleft.left().node());
+            node->ReplaceInput(1, Uint32Constant(c << k));
+            return Changed(node);
+          }
+          // TODO(turbofan): else the comparison is always true.
+        }
+      }
       break;
     }
     case IrOpcode::kUint32LessThanOrEqual: {
@@ -338,6 +332,9 @@
     }
     case IrOpcode::kFloat64Add: {
       Float64BinopMatcher m(node);
+      if (m.right().IsNaN()) {  // x + NaN => NaN
+        return Replace(m.right().node());
+      }
       if (m.IsFoldable()) {  // K + K => K
         return ReplaceFloat64(m.left().Value() + m.right().Value());
       }
@@ -345,6 +342,15 @@
     }
     case IrOpcode::kFloat64Sub: {
       Float64BinopMatcher m(node);
+      if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+        return Replace(m.left().node());  // x - 0 => x
+      }
+      if (m.right().IsNaN()) {  // x - NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN - x => NaN
+        return Replace(m.left().node());
+      }
       if (m.IsFoldable()) {  // K - K => K
         return ReplaceFloat64(m.left().Value() - m.right().Value());
       }
@@ -352,6 +358,12 @@
     }
     case IrOpcode::kFloat64Mul: {
       Float64BinopMatcher m(node);
+      if (m.right().Is(-1)) {  // x * -1.0 => -0.0 - x
+        node->set_op(machine()->Float64Sub());
+        node->ReplaceInput(0, Float64Constant(-0.0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
       if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
       if (m.right().IsNaN()) {                               // x * NaN => NaN
         return Replace(m.right().node());
@@ -377,6 +389,9 @@
     }
     case IrOpcode::kFloat64Mod: {
       Float64BinopMatcher m(node);
+      if (m.right().Is(0)) {  // x % 0 => NaN
+        return ReplaceFloat64(base::OS::nan_value());
+      }
       if (m.right().IsNaN()) {  // x % NaN => NaN
         return Replace(m.right().node());
       }
@@ -425,12 +440,8 @@
       if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
       break;
     }
-    case IrOpcode::kTruncateFloat64ToInt32: {
-      Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
-      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
-      break;
-    }
+    case IrOpcode::kTruncateFloat64ToInt32:
+      return ReduceTruncateFloat64ToInt32(node);
     case IrOpcode::kTruncateInt64ToInt32: {
       Int64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
@@ -443,6 +454,227 @@
       if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
       break;
     }
+    case IrOpcode::kStore:
+      return ReduceStore(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
+  DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
+  if (m.IsFoldable()) {                                  // K + K => K
+    return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
+                         bit_cast<uint32_t>(m.right().Value()));
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) return Replace(m.left().node());    // 0 / x => 0
+  if (m.right().Is(0)) return Replace(m.right().node());  // x / 0 => 0
+  if (m.right().Is(1)) return Replace(m.left().node());   // x / 1 => x
+  if (m.IsFoldable()) {                                   // K / K => K
+    return ReplaceInt32(
+        base::bits::SignedDiv32(m.left().Value(), m.right().Value()));
+  }
+  if (m.LeftEqualsRight()) {  // x / x => x != 0
+    Node* const zero = Int32Constant(0);
+    return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
+  }
+  if (m.right().Is(-1)) {  // x / -1 => 0 - x
+    node->set_op(machine()->Int32Sub());
+    node->ReplaceInput(0, Int32Constant(0));
+    node->ReplaceInput(1, m.left().node());
+    node->TrimInputCount(2);
+    return Changed(node);
+  }
+  if (m.right().HasValue()) {
+    int32_t const divisor = m.right().Value();
+    Node* const dividend = m.left().node();
+    Node* quotient = dividend;
+    if (base::bits::IsPowerOfTwo32(Abs(divisor))) {
+      uint32_t const shift = WhichPowerOf2Abs(divisor);
+      DCHECK_NE(0, shift);
+      if (shift > 1) {
+        quotient = Word32Sar(quotient, 31);
+      }
+      quotient = Int32Add(Word32Shr(quotient, 32u - shift), dividend);
+      quotient = Word32Sar(quotient, shift);
+    } else {
+      quotient = Int32Div(quotient, Abs(divisor));
+    }
+    if (divisor < 0) {
+      node->set_op(machine()->Int32Sub());
+      node->ReplaceInput(0, Int32Constant(0));
+      node->ReplaceInput(1, quotient);
+      node->TrimInputCount(2);
+      return Changed(node);
+    }
+    return Replace(quotient);
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
+  Uint32BinopMatcher m(node);
+  if (m.left().Is(0)) return Replace(m.left().node());    // 0 / x => 0
+  if (m.right().Is(0)) return Replace(m.right().node());  // x / 0 => 0
+  if (m.right().Is(1)) return Replace(m.left().node());   // x / 1 => x
+  if (m.IsFoldable()) {                                   // K / K => K
+    return ReplaceUint32(
+        base::bits::UnsignedDiv32(m.left().Value(), m.right().Value()));
+  }
+  if (m.LeftEqualsRight()) {  // x / x => x != 0
+    Node* const zero = Int32Constant(0);
+    return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
+  }
+  if (m.right().HasValue()) {
+    Node* const dividend = m.left().node();
+    uint32_t const divisor = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(divisor)) {  // x / 2^n => x >> n
+      node->set_op(machine()->Word32Shr());
+      node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
+      node->TrimInputCount(2);
+      return Changed(node);
+    } else {
+      return Replace(Uint32Div(dividend, divisor));
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) return Replace(m.left().node());    // 0 % x  => 0
+  if (m.right().Is(0)) return Replace(m.right().node());  // x % 0  => 0
+  if (m.right().Is(1)) return ReplaceInt32(0);            // x % 1  => 0
+  if (m.right().Is(-1)) return ReplaceInt32(0);           // x % -1 => 0
+  if (m.LeftEqualsRight()) return ReplaceInt32(0);        // x % x  => 0
+  if (m.IsFoldable()) {                                   // K % K => K
+    return ReplaceInt32(
+        base::bits::SignedMod32(m.left().Value(), m.right().Value()));
+  }
+  if (m.right().HasValue()) {
+    Node* const dividend = m.left().node();
+    int32_t const divisor = Abs(m.right().Value());
+    if (base::bits::IsPowerOfTwo32(divisor)) {
+      uint32_t const mask = divisor - 1;
+      Node* const zero = Int32Constant(0);
+      node->set_op(common()->Select(kMachInt32, BranchHint::kFalse));
+      node->ReplaceInput(
+          0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
+      node->ReplaceInput(
+          1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
+      node->ReplaceInput(2, Word32And(dividend, mask));
+    } else {
+      Node* quotient = Int32Div(dividend, divisor);
+      node->set_op(machine()->Int32Sub());
+      DCHECK_EQ(dividend, node->InputAt(0));
+      node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
+      node->TrimInputCount(2);
+    }
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
+  Uint32BinopMatcher m(node);
+  if (m.left().Is(0)) return Replace(m.left().node());    // 0 % x => 0
+  if (m.right().Is(0)) return Replace(m.right().node());  // x % 0 => 0
+  if (m.right().Is(1)) return ReplaceUint32(0);           // x % 1 => 0
+  if (m.LeftEqualsRight()) return ReplaceInt32(0);        // x % x  => 0
+  if (m.IsFoldable()) {                                   // K % K => K
+    return ReplaceUint32(
+        base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
+  }
+  if (m.right().HasValue()) {
+    Node* const dividend = m.left().node();
+    uint32_t const divisor = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(divisor)) {  // x % 2^n => x & 2^n-1
+      node->set_op(machine()->Word32And());
+      node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+    } else {
+      Node* quotient = Uint32Div(dividend, divisor);
+      node->set_op(machine()->Int32Sub());
+      DCHECK_EQ(dividend, node->InputAt(0));
+      node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
+    }
+    node->TrimInputCount(2);
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
+  Float64Matcher m(node->InputAt(0));
+  if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+  if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+  if (m.IsPhi()) {
+    Node* const phi = m.node();
+    DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
+    if (phi->OwnedBy(node)) {
+      // TruncateFloat64ToInt32(Phi[Float64](x1,...,xn))
+      //   => Phi[Int32](TruncateFloat64ToInt32(x1),
+      //                 ...,
+      //                 TruncateFloat64ToInt32(xn))
+      const int value_input_count = phi->InputCount() - 1;
+      for (int i = 0; i < value_input_count; ++i) {
+        Node* input = graph()->NewNode(machine()->TruncateFloat64ToInt32(),
+                                       phi->InputAt(i));
+        // TODO(bmeurer): Reschedule input for reduction once we have Revisit()
+        // instead of recursing into ReduceTruncateFloat64ToInt32() here.
+        Reduction reduction = ReduceTruncateFloat64ToInt32(input);
+        if (reduction.Changed()) input = reduction.replacement();
+        phi->ReplaceInput(i, input);
+      }
+      phi->set_op(common()->Phi(kMachInt32, value_input_count));
+      return Replace(phi);
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceStore(Node* node) {
+  MachineType const rep =
+      RepresentationOf(StoreRepresentationOf(node->op()).machine_type());
+  Node* const value = node->InputAt(2);
+  switch (value->opcode()) {
+    case IrOpcode::kWord32And: {
+      Uint32BinopMatcher m(value);
+      if (m.right().HasValue() &&
+          ((rep == kRepWord8 && (m.right().Value() & 0xff) == 0xff) ||
+           (rep == kRepWord16 && (m.right().Value() & 0xffff) == 0xffff))) {
+        node->ReplaceInput(2, m.left().node());
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kWord32Sar: {
+      Int32BinopMatcher m(value);
+      if (m.left().IsWord32Shl() &&
+          ((rep == kRepWord8 && m.right().IsInRange(1, 24)) ||
+           (rep == kRepWord16 && m.right().IsInRange(1, 16)))) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().Is(m.right().Value())) {
+          node->ReplaceInput(2, mleft.left().node());
+          return Changed(node);
+        }
+      }
+      break;
+    }
     default:
       break;
   }
@@ -487,6 +719,192 @@
 }
 
 
+Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
+  DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
+         (node->opcode() == IrOpcode::kWord32Shr) ||
+         (node->opcode() == IrOpcode::kWord32Sar));
+  if (machine()->Word32ShiftIsSafe()) {
+    // Remove the explicit 'and' with 0x1f if the shift provided by the machine
+    // instruction matches that required by JavaScript.
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(m.right().node());
+      if (mright.right().Is(0x1f)) {
+        node->ReplaceInput(1, mright.left().node());
+        return Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+  if (m.IsFoldable()) {                                  // K << K => K
+    return ReplaceInt32(m.left().Value() << m.right().Value());
+  }
+  if (m.right().IsInRange(1, 31)) {
+    // (x >>> K) << K => x & ~(2^K - 1)
+    // (x >> K) << K => x & ~(2^K - 1)
+    if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().Is(m.right().Value())) {
+        node->set_op(machine()->Word32And());
+        node->ReplaceInput(0, mleft.left().node());
+        node->ReplaceInput(1,
+                           Uint32Constant(~((1U << m.right().Value()) - 1U)));
+        Reduction reduction = ReduceWord32And(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+  }
+  return ReduceWord32Shifts(node);
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
+  if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
+  if (m.IsFoldable()) {                                   // K & K  => K
+    return ReplaceInt32(m.left().Value() & m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
+  if (m.left().IsWord32And() && m.right().HasValue()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {  // (x & K) & K => x & K
+      node->ReplaceInput(0, mleft.left().node());
+      node->ReplaceInput(
+          1, Int32Constant(m.right().Value() & mleft.right().Value()));
+      Reduction const reduction = ReduceWord32And(node);
+      return reduction.Changed() ? reduction : Changed(node);
+    }
+  }
+  if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue() &&
+        (mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
+      // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
+      node->set_op(machine()->Int32Add());
+      node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+      node->ReplaceInput(1, mleft.right().node());
+      Reduction const reduction = ReduceInt32Add(node);
+      return reduction.Changed() ? reduction : Changed(node);
+    }
+    if (mleft.left().IsInt32Mul()) {
+      Int32BinopMatcher mleftleft(mleft.left().node());
+      if (mleftleft.right().IsMultipleOf(-m.right().Value())) {
+        // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0,
+                           Word32And(mleft.right().node(), m.right().node()));
+        node->ReplaceInput(1, mleftleft.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.right().IsInt32Mul()) {
+      Int32BinopMatcher mleftright(mleft.right().node());
+      if (mleftright.right().IsMultipleOf(-m.right().Value())) {
+        // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+        node->ReplaceInput(1, mleftright.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.left().IsWord32Shl()) {
+      Int32BinopMatcher mleftleft(mleft.left().node());
+      if (mleftleft.right().Is(
+              base::bits::CountTrailingZeros32(m.right().Value()))) {
+        // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0,
+                           Word32And(mleft.right().node(), m.right().node()));
+        node->ReplaceInput(1, mleftleft.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.right().IsWord32Shl()) {
+      Int32BinopMatcher mleftright(mleft.right().node());
+      if (mleftright.right().Is(
+              base::bits::CountTrailingZeros32(m.right().Value()))) {
+        // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+        node->ReplaceInput(1, mleftright.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+  if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+  if (m.IsFoldable()) {                                    // K | K  => K
+    return ReplaceInt32(m.left().Value() | m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+
+  Node* shl = NULL;
+  Node* shr = NULL;
+  // Recognize rotation, we are matching either:
+  //  * x << y | x >>> (32 - y) => x ror (32 - y), i.e  x rol y
+  //  * x << (32 - y) | x >>> y => x ror y
+  // as well as their commuted form.
+  if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
+    shl = m.left().node();
+    shr = m.right().node();
+  } else if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+    shl = m.right().node();
+    shr = m.left().node();
+  } else {
+    return NoChange();
+  }
+
+  Int32BinopMatcher mshl(shl);
+  Int32BinopMatcher mshr(shr);
+  if (mshl.left().node() != mshr.left().node()) return NoChange();
+
+  if (mshl.right().HasValue() && mshr.right().HasValue()) {
+    // Case where y is a constant.
+    if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
+  } else {
+    Node* sub = NULL;
+    Node* y = NULL;
+    if (mshl.right().IsInt32Sub()) {
+      sub = mshl.right().node();
+      y = mshr.right().node();
+    } else if (mshr.right().IsInt32Sub()) {
+      sub = mshr.right().node();
+      y = mshl.right().node();
+    } else {
+      return NoChange();
+    }
+
+    Int32BinopMatcher msub(sub);
+    if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
+  }
+
+  node->set_op(machine()->Word32Ror());
+  node->ReplaceInput(0, mshl.left().node());
+  node->ReplaceInput(1, mshr.right().node());
+  return Changed(node);
+}
+
+
 CommonOperatorBuilder* MachineOperatorReducer::common() const {
   return jsgraph()->common();
 }
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index c79ceae..8200abb 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -24,13 +24,28 @@
   explicit MachineOperatorReducer(JSGraph* jsgraph);
   ~MachineOperatorReducer();
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) OVERRIDE;
 
  private:
   Node* Float32Constant(volatile float value);
   Node* Float64Constant(volatile double value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
+  Node* Uint32Constant(uint32_t value) {
+    return Int32Constant(bit_cast<uint32_t>(value));
+  }
+  Node* Word32And(Node* lhs, Node* rhs);
+  Node* Word32And(Node* lhs, uint32_t rhs) {
+    return Word32And(lhs, Uint32Constant(rhs));
+  }
+  Node* Word32Sar(Node* lhs, uint32_t rhs);
+  Node* Word32Shr(Node* lhs, uint32_t rhs);
+  Node* Word32Equal(Node* lhs, Node* rhs);
+  Node* Int32Add(Node* lhs, Node* rhs);
+  Node* Int32Sub(Node* lhs, Node* rhs);
+  Node* Int32Mul(Node* lhs, Node* rhs);
+  Node* Int32Div(Node* dividend, int32_t divisor);
+  Node* Uint32Div(Node* dividend, uint32_t divisor);
 
   Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
   Reduction ReplaceFloat32(volatile float value) {
@@ -42,11 +57,25 @@
   Reduction ReplaceInt32(int32_t value) {
     return Replace(Int32Constant(value));
   }
+  Reduction ReplaceUint32(uint32_t value) {
+    return Replace(Uint32Constant(value));
+  }
   Reduction ReplaceInt64(int64_t value) {
     return Replace(Int64Constant(value));
   }
 
+  Reduction ReduceInt32Add(Node* node);
+  Reduction ReduceInt32Div(Node* node);
+  Reduction ReduceUint32Div(Node* node);
+  Reduction ReduceInt32Mod(Node* node);
+  Reduction ReduceUint32Mod(Node* node);
+  Reduction ReduceTruncateFloat64ToInt32(Node* node);
+  Reduction ReduceStore(Node* node);
   Reduction ReduceProjection(size_t index, Node* node);
+  Reduction ReduceWord32Shifts(Node* node);
+  Reduction ReduceWord32Shl(Node* node);
+  Reduction ReduceWord32And(Node* node);
+  Reduction ReduceWord32Or(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/machine-operator-unittest.cc b/src/compiler/machine-operator-unittest.cc
deleted file mode 100644
index cb93ce7..0000000
--- a/src/compiler/machine-operator-unittest.cc
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/operator-properties-inl.h"
-#include "testing/gtest-support.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#if GTEST_HAS_COMBINE
-
-// TODO(bmeurer): Find a new home for these.
-inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
-  OStringStream ost;
-  ost << type;
-  return os << ost.c_str();
-}
-inline std::ostream& operator<<(std::ostream& os,
-                         const WriteBarrierKind& write_barrier_kind) {
-  OStringStream ost;
-  ost << write_barrier_kind;
-  return os << ost.c_str();
-}
-
-
-template <typename T>
-class MachineOperatorTestWithParam
-    : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
- protected:
-  MachineType type() const { return ::testing::get<0>(B::GetParam()); }
-  const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
-
- private:
-  typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
-};
-
-
-namespace {
-
-const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
-
-
-const MachineType kMachineTypes[] = {
-    kMachFloat32, kMachFloat64,   kMachInt8,   kMachUint8,  kMachInt16,
-    kMachUint16,  kMachInt32,     kMachUint32, kMachInt64,  kMachUint64,
-    kMachPtr,     kMachAnyTagged, kRepBit,     kRepWord8,   kRepWord16,
-    kRepWord32,   kRepWord64,     kRepFloat32, kRepFloat64, kRepTagged};
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Load operator.
-
-
-typedef MachineOperatorTestWithParam<LoadRepresentation>
-    MachineLoadOperatorTest;
-
-
-TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
-  EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
-}
-
-
-TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
-  const Operator* op = machine.Load(GetParam());
-
-  EXPECT_EQ(2, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(3, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
-  EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
-}
-
-
-TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
-  MachineOperatorBuilder machine(type());
-  EXPECT_EQ(GetParam(),
-            OpParameter<LoadRepresentation>(machine.Load(GetParam())));
-}
-
-
-INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
-                        ::testing::Combine(::testing::ValuesIn(kMachineReps),
-                                           ::testing::ValuesIn(kMachineTypes)));
-
-
-// -----------------------------------------------------------------------------
-// Store operator.
-
-
-class MachineStoreOperatorTest
-    : public MachineOperatorTestWithParam<
-          ::testing::tuple<MachineType, WriteBarrierKind> > {
- protected:
-  StoreRepresentation GetParam() const {
-    return StoreRepresentation(
-        ::testing::get<0>(MachineOperatorTestWithParam<
-            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
-        ::testing::get<1>(MachineOperatorTestWithParam<
-            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
-  }
-};
-
-
-TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
-  EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
-}
-
-
-TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
-  const Operator* op = machine.Store(GetParam());
-
-  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
-  EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
-}
-
-
-TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
-  MachineOperatorBuilder machine(type());
-  EXPECT_EQ(GetParam(),
-            OpParameter<StoreRepresentation>(machine.Store(GetParam())));
-}
-
-
-INSTANTIATE_TEST_CASE_P(
-    MachineOperatorTest, MachineStoreOperatorTest,
-    ::testing::Combine(
-        ::testing::ValuesIn(kMachineReps),
-        ::testing::Combine(::testing::ValuesIn(kMachineTypes),
-                           ::testing::Values(kNoWriteBarrier,
-                                             kFullWriteBarrier))));
-
-
-// -----------------------------------------------------------------------------
-// Pure operators.
-
-
-namespace {
-
-struct PureOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)();
-  IrOpcode::Value opcode;
-  int value_input_count;
-  int value_output_count;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
-  return os << IrOpcode::Mnemonic(pop.opcode);
-}
-
-
-const PureOperator kPureOperators[] = {
-#define PURE(Name, input_count, output_count)                      \
-  {                                                                \
-    &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
-        output_count                                               \
-  }
-    PURE(Word32And, 2, 1),                PURE(Word32Or, 2, 1),
-    PURE(Word32Xor, 2, 1),                PURE(Word32Shl, 2, 1),
-    PURE(Word32Shr, 2, 1),                PURE(Word32Sar, 2, 1),
-    PURE(Word32Ror, 2, 1),                PURE(Word32Equal, 2, 1),
-    PURE(Word64And, 2, 1),                PURE(Word64Or, 2, 1),
-    PURE(Word64Xor, 2, 1),                PURE(Word64Shl, 2, 1),
-    PURE(Word64Shr, 2, 1),                PURE(Word64Sar, 2, 1),
-    PURE(Word64Ror, 2, 1),                PURE(Word64Equal, 2, 1),
-    PURE(Int32Add, 2, 1),                 PURE(Int32AddWithOverflow, 2, 2),
-    PURE(Int32Sub, 2, 1),                 PURE(Int32SubWithOverflow, 2, 2),
-    PURE(Int32Mul, 2, 1),                 PURE(Int32Div, 2, 1),
-    PURE(Int32UDiv, 2, 1),                PURE(Int32Mod, 2, 1),
-    PURE(Int32UMod, 2, 1),                PURE(Int32LessThan, 2, 1),
-    PURE(Int32LessThanOrEqual, 2, 1),     PURE(Uint32LessThan, 2, 1),
-    PURE(Uint32LessThanOrEqual, 2, 1),    PURE(Int64Add, 2, 1),
-    PURE(Int64Sub, 2, 1),                 PURE(Int64Mul, 2, 1),
-    PURE(Int64Div, 2, 1),                 PURE(Int64UDiv, 2, 1),
-    PURE(Int64Mod, 2, 1),                 PURE(Int64UMod, 2, 1),
-    PURE(Int64LessThan, 2, 1),            PURE(Int64LessThanOrEqual, 2, 1),
-    PURE(ChangeFloat32ToFloat64, 1, 1),   PURE(ChangeFloat64ToInt32, 1, 1),
-    PURE(ChangeFloat64ToUint32, 1, 1),    PURE(ChangeInt32ToInt64, 1, 1),
-    PURE(ChangeUint32ToFloat64, 1, 1),    PURE(ChangeUint32ToUint64, 1, 1),
-    PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
-    PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
-    PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
-    PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
-    PURE(Float64Sqrt, 1, 1),              PURE(Float64Equal, 2, 1),
-    PURE(Float64LessThan, 2, 1),          PURE(Float64LessThanOrEqual, 2, 1)
-#undef PURE
-};
-
-
-typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
-
-}  // namespace
-
-
-TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
-  const PureOperator& pop = GetParam();
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
-  EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
-}
-
-
-TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (machine.*pop.constructor)();
-
-  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(pop.value_output_count,
-            OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(MachinePureOperatorTest, MarkedAsPure) {
-  MachineOperatorBuilder machine(type());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (machine.*pop.constructor)();
-  EXPECT_TRUE(op->HasProperty(Operator::kPure));
-}
-
-
-TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (machine.*pop.constructor)();
-  EXPECT_EQ(pop.opcode, op->opcode());
-}
-
-
-INSTANTIATE_TEST_CASE_P(
-    MachineOperatorTest, MachinePureOperatorTest,
-    ::testing::Combine(::testing::ValuesIn(kMachineReps),
-                       ::testing::ValuesIn(kPureOperators)));
-
-#endif  // GTEST_HAS_COMBINE
-
-
-// -----------------------------------------------------------------------------
-// Pseudo operators.
-
-
-TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
-  MachineOperatorBuilder machine(kRepWord32);
-  EXPECT_EQ(machine.Word32And(), machine.WordAnd());
-  EXPECT_EQ(machine.Word32Or(), machine.WordOr());
-  EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
-  EXPECT_EQ(machine.Word32Shl(), machine.WordShl());
-  EXPECT_EQ(machine.Word32Shr(), machine.WordShr());
-  EXPECT_EQ(machine.Word32Sar(), machine.WordSar());
-  EXPECT_EQ(machine.Word32Ror(), machine.WordRor());
-  EXPECT_EQ(machine.Word32Equal(), machine.WordEqual());
-  EXPECT_EQ(machine.Int32Add(), machine.IntAdd());
-  EXPECT_EQ(machine.Int32Sub(), machine.IntSub());
-  EXPECT_EQ(machine.Int32Mul(), machine.IntMul());
-  EXPECT_EQ(machine.Int32Div(), machine.IntDiv());
-  EXPECT_EQ(machine.Int32UDiv(), machine.IntUDiv());
-  EXPECT_EQ(machine.Int32Mod(), machine.IntMod());
-  EXPECT_EQ(machine.Int32UMod(), machine.IntUMod());
-  EXPECT_EQ(machine.Int32LessThan(), machine.IntLessThan());
-  EXPECT_EQ(machine.Int32LessThanOrEqual(), machine.IntLessThanOrEqual());
-}
-
-
-TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
-  MachineOperatorBuilder machine(kRepWord64);
-  EXPECT_EQ(machine.Word64And(), machine.WordAnd());
-  EXPECT_EQ(machine.Word64Or(), machine.WordOr());
-  EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
-  EXPECT_EQ(machine.Word64Shl(), machine.WordShl());
-  EXPECT_EQ(machine.Word64Shr(), machine.WordShr());
-  EXPECT_EQ(machine.Word64Sar(), machine.WordSar());
-  EXPECT_EQ(machine.Word64Ror(), machine.WordRor());
-  EXPECT_EQ(machine.Word64Equal(), machine.WordEqual());
-  EXPECT_EQ(machine.Int64Add(), machine.IntAdd());
-  EXPECT_EQ(machine.Int64Sub(), machine.IntSub());
-  EXPECT_EQ(machine.Int64Mul(), machine.IntMul());
-  EXPECT_EQ(machine.Int64Div(), machine.IntDiv());
-  EXPECT_EQ(machine.Int64UDiv(), machine.IntUDiv());
-  EXPECT_EQ(machine.Int64Mod(), machine.IntMod());
-  EXPECT_EQ(machine.Int64UMod(), machine.IntUMod());
-  EXPECT_EQ(machine.Int64LessThan(), machine.IntLessThan());
-  EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 2f30bd2..eb034e9 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -7,13 +7,15 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/v8.h"
+#include "src/zone-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind) {
-  switch (write_barrier_kind) {
+std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
+  switch (kind) {
     case kNoWriteBarrier:
       return os << "NoWriteBarrier";
     case kFullWriteBarrier:
@@ -24,98 +26,113 @@
 }
 
 
-OStream& operator<<(OStream& os, const StoreRepresentation& rep) {
+bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
+  return lhs.machine_type() == rhs.machine_type() &&
+         lhs.write_barrier_kind() == rhs.write_barrier_kind();
+}
+
+
+bool operator!=(StoreRepresentation lhs, StoreRepresentation rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(StoreRepresentation rep) {
+  return base::hash_combine(rep.machine_type(), rep.write_barrier_kind());
+}
+
+
+std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
   return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
             << ")";
 }
 
 
-template <>
-struct StaticParameterTraits<StoreRepresentation> {
-  static OStream& PrintTo(OStream& os, const StoreRepresentation& rep) {
-    return os << rep;
-  }
-  static int HashCode(const StoreRepresentation& rep) {
-    return rep.machine_type() + rep.write_barrier_kind();
-  }
-  static bool Equals(const StoreRepresentation& rep1,
-                     const StoreRepresentation& rep2) {
-    return rep1 == rep2;
-  }
-};
+StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kStore, op->opcode());
+  return OpParameter<StoreRepresentation>(op);
+}
 
 
-template <>
-struct StaticParameterTraits<LoadRepresentation> {
-  static OStream& PrintTo(OStream& os, LoadRepresentation type) {  // NOLINT
-    return os << type;
-  }
-  static int HashCode(LoadRepresentation type) { return type; }
-  static bool Equals(LoadRepresentation lhs, LoadRepresentation rhs) {
-    return lhs == rhs;
-  }
-};
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
+  return OpParameter<CheckedLoadRepresentation>(op);
+}
+
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
+  return OpParameter<CheckedStoreRepresentation>(op);
+}
 
 
 #define PURE_OP_LIST(V)                                                       \
-  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
-  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
-  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
-  V(Word32Shl, Operator::kNoProperties, 2, 1)                                 \
-  V(Word32Shr, Operator::kNoProperties, 2, 1)                                 \
-  V(Word32Sar, Operator::kNoProperties, 2, 1)                                 \
-  V(Word32Ror, Operator::kNoProperties, 2, 1)                                 \
-  V(Word32Equal, Operator::kCommutative, 2, 1)                                \
-  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
-  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
-  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
-  V(Word64Shl, Operator::kNoProperties, 2, 1)                                 \
-  V(Word64Shr, Operator::kNoProperties, 2, 1)                                 \
-  V(Word64Sar, Operator::kNoProperties, 2, 1)                                 \
-  V(Word64Ror, Operator::kNoProperties, 2, 1)                                 \
-  V(Word64Equal, Operator::kCommutative, 2, 1)                                \
-  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
+  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word32Shl, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word32Shr, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word32Sar, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word32Ror, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word32Equal, Operator::kCommutative, 2, 0, 1)                             \
+  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
+  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
+  V(Word64Shl, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word64Shr, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word64Sar, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word64Ror, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Word64Equal, Operator::kCommutative, 2, 0, 1)                             \
+  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
   V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
-    2)                                                                        \
-  V(Int32Sub, Operator::kNoProperties, 2, 1)                                  \
-  V(Int32SubWithOverflow, Operator::kNoProperties, 2, 2)                      \
-  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
-  V(Int32Div, Operator::kNoProperties, 2, 1)                                  \
-  V(Int32UDiv, Operator::kNoProperties, 2, 1)                                 \
-  V(Int32Mod, Operator::kNoProperties, 2, 1)                                  \
-  V(Int32UMod, Operator::kNoProperties, 2, 1)                                 \
-  V(Int32LessThan, Operator::kNoProperties, 2, 1)                             \
-  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
-  V(Uint32LessThan, Operator::kNoProperties, 2, 1)                            \
-  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 1)                     \
-  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
-  V(Int64Sub, Operator::kNoProperties, 2, 1)                                  \
-  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
-  V(Int64Div, Operator::kNoProperties, 2, 1)                                  \
-  V(Int64UDiv, Operator::kNoProperties, 2, 1)                                 \
-  V(Int64Mod, Operator::kNoProperties, 2, 1)                                  \
-  V(Int64UMod, Operator::kNoProperties, 2, 1)                                 \
-  V(Int64LessThan, Operator::kNoProperties, 2, 1)                             \
-  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
-  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1)                    \
-  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1)                      \
-  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1)                     \
-  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1)                      \
-  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1)                        \
-  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1)                     \
-  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1)                      \
-  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1)                  \
-  V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1)                    \
-  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1)                      \
-  V(Float64Add, Operator::kCommutative, 2, 1)                                 \
-  V(Float64Sub, Operator::kNoProperties, 2, 1)                                \
-  V(Float64Mul, Operator::kCommutative, 2, 1)                                 \
-  V(Float64Div, Operator::kNoProperties, 2, 1)                                \
-  V(Float64Mod, Operator::kNoProperties, 2, 1)                                \
-  V(Float64Sqrt, Operator::kNoProperties, 1, 1)                               \
-  V(Float64Equal, Operator::kCommutative, 2, 1)                               \
-  V(Float64LessThan, Operator::kNoProperties, 2, 1)                           \
-  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
+    0, 2)                                                                     \
+  V(Int32Sub, Operator::kNoProperties, 2, 0, 1)                               \
+  V(Int32SubWithOverflow, Operator::kNoProperties, 2, 0, 2)                   \
+  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
+  V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
+  V(Int32Div, Operator::kNoProperties, 2, 1, 1)                               \
+  V(Int32Mod, Operator::kNoProperties, 2, 1, 1)                               \
+  V(Int32LessThan, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint32Div, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                  \
+  V(Uint32Mod, Operator::kNoProperties, 2, 1, 1)                              \
+  V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
+  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
+  V(Int64Sub, Operator::kNoProperties, 2, 0, 1)                               \
+  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
+  V(Int64Div, Operator::kNoProperties, 2, 0, 1)                               \
+  V(Int64Mod, Operator::kNoProperties, 2, 0, 1)                               \
+  V(Int64LessThan, Operator::kNoProperties, 2, 0, 1)                          \
+  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                   \
+  V(Uint64Div, Operator::kNoProperties, 2, 0, 1)                              \
+  V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1)                         \
+  V(Uint64Mod, Operator::kNoProperties, 2, 0, 1)                              \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
+  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                     \
+  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1)                  \
+  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1)               \
+  V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float64Add, Operator::kCommutative, 2, 0, 1)                              \
+  V(Float64Sub, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float64Mul, Operator::kCommutative, 2, 0, 1)                              \
+  V(Float64Div, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float64Mod, Operator::kNoProperties, 2, 0, 1)                             \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Ceil, Operator::kNoProperties, 1, 0, 1)                            \
+  V(Float64Floor, Operator::kNoProperties, 1, 0, 1)                           \
+  V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1)                   \
+  V(Float64Equal, Operator::kCommutative, 2, 0, 1)                            \
+  V(Float64LessThan, Operator::kNoProperties, 2, 0, 1)                        \
+  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)                 \
+  V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
 
 
 #define MACHINE_TYPE_LIST(V) \
@@ -140,64 +157,85 @@
   V(RepTagged)
 
 
-struct MachineOperatorBuilderImpl {
-#define PURE(Name, properties, input_count, output_count)                 \
-  struct Name##Operator FINAL : public SimpleOperator {                   \
-    Name##Operator()                                                      \
-        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
-                         input_count, output_count, #Name) {}             \
-  };                                                                      \
+struct MachineOperatorGlobalCache {
+#define PURE(Name, properties, value_input_count, control_input_count,         \
+             output_count)                                                     \
+  struct Name##Operator FINAL : public Operator {                              \
+    Name##Operator()                                                           \
+        : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name,     \
+                   value_input_count, 0, control_input_count, output_count, 0, \
+                   0) {}                                                       \
+  };                                                                           \
   Name##Operator k##Name;
   PURE_OP_LIST(PURE)
 #undef PURE
 
-#define LOAD(Type)                                                            \
-  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> {  \
-    Load##Type##Operator()                                                    \
-        : Operator1<LoadRepresentation>(                                      \
-              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, 2, 1, \
-              "Load", k##Type) {}                                             \
-  };                                                                          \
-  Load##Type##Operator k##Load##Type;
+#define LOAD(Type)                                                             \
+  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> {   \
+    Load##Type##Operator()                                                     \
+        : Operator1<LoadRepresentation>(                                       \
+              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite,        \
+              "Load", 2, 1, 1, 1, 1, 0, k##Type) {}                            \
+  };                                                                           \
+  struct CheckedLoad##Type##Operator FINAL                                     \
+      : public Operator1<CheckedLoadRepresentation> {                          \
+    CheckedLoad##Type##Operator()                                              \
+        : Operator1<CheckedLoadRepresentation>(                                \
+              IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
+              "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {}                     \
+  };                                                                           \
+  Load##Type##Operator kLoad##Type;                                            \
+  CheckedLoad##Type##Operator kCheckedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
-#define STORE(Type)                                                           \
-  struct Store##Type##Operator : public Operator1<StoreRepresentation> {      \
-    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)       \
-        : Operator1<StoreRepresentation>(                                     \
-              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, 3, 0, \
-              "Store", StoreRepresentation(k##Type, write_barrier_kind)) {}   \
-  };                                                                          \
-  struct Store##Type##NoWriteBarrier##Operator FINAL                          \
-      : public Store##Type##Operator {                                        \
-    Store##Type##NoWriteBarrier##Operator()                                   \
-        : Store##Type##Operator(kNoWriteBarrier) {}                           \
-  };                                                                          \
-  struct Store##Type##FullWriteBarrier##Operator FINAL                        \
-      : public Store##Type##Operator {                                        \
-    Store##Type##FullWriteBarrier##Operator()                                 \
-        : Store##Type##Operator(kFullWriteBarrier) {}                         \
-  };                                                                          \
-  Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier;       \
-  Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+#define STORE(Type)                                                            \
+  struct Store##Type##Operator : public Operator1<StoreRepresentation> {       \
+    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)        \
+        : Operator1<StoreRepresentation>(                                      \
+              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow,        \
+              "Store", 3, 1, 1, 0, 1, 0,                                       \
+              StoreRepresentation(k##Type, write_barrier_kind)) {}             \
+  };                                                                           \
+  struct Store##Type##NoWriteBarrier##Operator FINAL                           \
+      : public Store##Type##Operator {                                         \
+    Store##Type##NoWriteBarrier##Operator()                                    \
+        : Store##Type##Operator(kNoWriteBarrier) {}                            \
+  };                                                                           \
+  struct Store##Type##FullWriteBarrier##Operator FINAL                         \
+      : public Store##Type##Operator {                                         \
+    Store##Type##FullWriteBarrier##Operator()                                  \
+        : Store##Type##Operator(kFullWriteBarrier) {}                          \
+  };                                                                           \
+  struct CheckedStore##Type##Operator FINAL                                    \
+      : public Operator1<CheckedStoreRepresentation> {                         \
+    CheckedStore##Type##Operator()                                             \
+        : Operator1<CheckedStoreRepresentation>(                               \
+              IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+              "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {}                    \
+  };                                                                           \
+  Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier;          \
+  Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier;      \
+  CheckedStore##Type##Operator kCheckedStore##Type;
   MACHINE_TYPE_LIST(STORE)
 #undef STORE
 };
 
 
-static base::LazyInstance<MachineOperatorBuilderImpl>::type kImpl =
+static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
 
-MachineOperatorBuilder::MachineOperatorBuilder(MachineType word)
-    : impl_(kImpl.Get()), word_(word) {
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+                                               Flags flags)
+    : zone_(zone), cache_(kCache.Get()), word_(word), flags_(flags) {
   DCHECK(word == kRepWord32 || word == kRepWord64);
 }
 
 
-#define PURE(Name, properties, input_count, output_count) \
-  const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
+#define PURE(Name, properties, value_input_count, control_input_count, \
+             output_count)                                             \
+  const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(PURE)
 #undef PURE
 
@@ -206,28 +244,29 @@
   switch (rep) {
 #define LOAD(Type) \
   case k##Type:    \
-    return &impl_.k##Load##Type;
+    return &cache_.kLoad##Type;
     MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
-
     default:
       break;
   }
-  UNREACHABLE();
-  return NULL;
+  // Uncached.
+  return new (zone_) Operator1<LoadRepresentation>(  // --
+      IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, "Load", 2, 1, 1,
+      1, 1, 0, rep);
 }
 
 
 const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
   switch (rep.machine_type()) {
-#define STORE(Type)                                     \
-  case k##Type:                                         \
-    switch (rep.write_barrier_kind()) {                 \
-      case kNoWriteBarrier:                             \
-        return &impl_.k##Store##Type##NoWriteBarrier;   \
-      case kFullWriteBarrier:                           \
-        return &impl_.k##Store##Type##FullWriteBarrier; \
-    }                                                   \
+#define STORE(Type)                                      \
+  case k##Type:                                          \
+    switch (rep.write_barrier_kind()) {                  \
+      case kNoWriteBarrier:                              \
+        return &cache_.k##Store##Type##NoWriteBarrier;   \
+      case kFullWriteBarrier:                            \
+        return &cache_.k##Store##Type##FullWriteBarrier; \
+    }                                                    \
     break;
     MACHINE_TYPE_LIST(STORE)
 #undef STORE
@@ -235,8 +274,46 @@
     default:
       break;
   }
-  UNREACHABLE();
-  return NULL;
+  // Uncached.
+  return new (zone_) Operator1<StoreRepresentation>(  // --
+      IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
+      1, 0, 1, 0, rep);
+}
+
+
+const Operator* MachineOperatorBuilder::CheckedLoad(
+    CheckedLoadRepresentation rep) {
+  switch (rep) {
+#define LOAD(Type) \
+  case k##Type:    \
+    return &cache_.kCheckedLoad##Type;
+    MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone_) Operator1<CheckedLoadRepresentation>(
+      IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
+      "CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
+}
+
+
+const Operator* MachineOperatorBuilder::CheckedStore(
+    CheckedStoreRepresentation rep) {
+  switch (rep) {
+#define STORE(Type) \
+  case k##Type:     \
+    return &cache_.kCheckedStore##Type;
+    MACHINE_TYPE_LIST(STORE)
+#undef STORE
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone_) Operator1<CheckedStoreRepresentation>(
+      IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
+      "CheckedStore", 4, 1, 1, 0, 1, 0, rep);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 92c8ac4..42f3130 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_MACHINE_OPERATOR_H_
 #define V8_COMPILER_MACHINE_OPERATOR_H_
 
+#include "src/base/flags.h"
 #include "src/compiler/machine-type.h"
 
 namespace v8 {
@@ -12,21 +13,22 @@
 namespace compiler {
 
 // Forward declarations.
-struct MachineOperatorBuilderImpl;
+struct MachineOperatorGlobalCache;
 class Operator;
 
 
 // Supported write barrier modes.
 enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
 
-OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind);
+std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
 
 
+// A Load needs a MachineType.
 typedef MachineType LoadRepresentation;
 
 
-// A Store needs a MachineType and a WriteBarrierKind
-// in order to emit the correct write barrier.
+// A Store needs a MachineType and a WriteBarrierKind in order to emit the
+// correct write barrier.
 class StoreRepresentation FINAL {
  public:
   StoreRepresentation(MachineType machine_type,
@@ -41,26 +43,49 @@
   WriteBarrierKind write_barrier_kind_;
 };
 
-inline bool operator==(const StoreRepresentation& rep1,
-                       const StoreRepresentation& rep2) {
-  return rep1.machine_type() == rep2.machine_type() &&
-         rep1.write_barrier_kind() == rep2.write_barrier_kind();
-}
+bool operator==(StoreRepresentation, StoreRepresentation);
+bool operator!=(StoreRepresentation, StoreRepresentation);
 
-inline bool operator!=(const StoreRepresentation& rep1,
-                       const StoreRepresentation& rep2) {
-  return !(rep1 == rep2);
-}
+size_t hash_value(StoreRepresentation);
 
-OStream& operator<<(OStream& os, const StoreRepresentation& rep);
+std::ostream& operator<<(std::ostream&, StoreRepresentation);
+
+StoreRepresentation const& StoreRepresentationOf(Operator const*);
+
+
+// A CheckedLoad needs a MachineType.
+typedef MachineType CheckedLoadRepresentation;
+
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
+
+
+// A CheckedStore needs a MachineType.
+typedef MachineType CheckedStoreRepresentation;
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
 
 
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
 // for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder FINAL {
+class MachineOperatorBuilder FINAL : public ZoneObject {
  public:
-  explicit MachineOperatorBuilder(MachineType word = kMachPtr);
+  // Flags that specify which operations are available. This is useful
+  // for operations that are unsupported by some back-ends.
+  enum Flag {
+    kNoFlags = 0u,
+    kFloat64Floor = 1u << 0,
+    kFloat64Ceil = 1u << 1,
+    kFloat64RoundTruncate = 1u << 2,
+    kFloat64RoundTiesAway = 1u << 3,
+    kInt32DivIsSafe = 1u << 4,
+    kUint32DivIsSafe = 1u << 5,
+    kWord32ShiftIsSafe = 1u << 6
+  };
+  typedef base::Flags<Flag, unsigned> Flags;
+
+  explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
+                                  Flags supportedOperators = kNoFlags);
 
   const Operator* Word32And();
   const Operator* Word32Or();
@@ -70,6 +95,7 @@
   const Operator* Word32Sar();
   const Operator* Word32Ror();
   const Operator* Word32Equal();
+  bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
 
   const Operator* Word64And();
   const Operator* Word64Or();
@@ -85,24 +111,29 @@
   const Operator* Int32Sub();
   const Operator* Int32SubWithOverflow();
   const Operator* Int32Mul();
+  const Operator* Int32MulHigh();
   const Operator* Int32Div();
-  const Operator* Int32UDiv();
   const Operator* Int32Mod();
-  const Operator* Int32UMod();
   const Operator* Int32LessThan();
   const Operator* Int32LessThanOrEqual();
+  const Operator* Uint32Div();
   const Operator* Uint32LessThan();
   const Operator* Uint32LessThanOrEqual();
+  const Operator* Uint32Mod();
+  const Operator* Uint32MulHigh();
+  bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
+  bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
 
   const Operator* Int64Add();
   const Operator* Int64Sub();
   const Operator* Int64Mul();
   const Operator* Int64Div();
-  const Operator* Int64UDiv();
   const Operator* Int64Mod();
-  const Operator* Int64UMod();
   const Operator* Int64LessThan();
   const Operator* Int64LessThanOrEqual();
+  const Operator* Uint64Div();
+  const Operator* Uint64LessThan();
+  const Operator* Uint64Mod();
 
   // These operators change the representation of numbers while preserving the
   // value of the number. Narrowing operators assume the input is representable
@@ -136,12 +167,30 @@
   const Operator* Float64LessThan();
   const Operator* Float64LessThanOrEqual();
 
+  // Floating point rounding.
+  const Operator* Float64Floor();
+  const Operator* Float64Ceil();
+  const Operator* Float64RoundTruncate();
+  const Operator* Float64RoundTiesAway();
+  bool HasFloat64Floor() { return flags_ & kFloat64Floor; }
+  bool HasFloat64Ceil() { return flags_ & kFloat64Ceil; }
+  bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
+  bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
+
   // load [base + index]
   const Operator* Load(LoadRepresentation rep);
 
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
 
+  // Access to the machine stack.
+  const Operator* LoadStackPointer();
+
+  // checked-load heap, index, length
+  const Operator* CheckedLoad(CheckedLoadRepresentation);
+  // checked-store heap, index, length, value
+  const Operator* CheckedStore(CheckedStoreRepresentation);
+
   // Target machine word-size assumed by this builder.
   bool Is32() const { return word() == kRepWord32; }
   bool Is64() const { return word() == kRepWord64; }
@@ -162,11 +211,12 @@
   V(Int, Sub)             \
   V(Int, Mul)             \
   V(Int, Div)             \
-  V(Int, UDiv)            \
   V(Int, Mod)             \
-  V(Int, UMod)            \
   V(Int, LessThan)        \
-  V(Int, LessThanOrEqual)
+  V(Int, LessThanOrEqual) \
+  V(Uint, Div)            \
+  V(Uint, LessThan)       \
+  V(Uint, Mod)
 #define PSEUDO_OP(Prefix, Suffix)                                \
   const Operator* Prefix##Suffix() {                             \
     return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
@@ -176,10 +226,17 @@
 #undef PSEUDO_OP_LIST
 
  private:
-  const MachineOperatorBuilderImpl& impl_;
+  Zone* zone_;
+  const MachineOperatorGlobalCache& cache_;
   const MachineType word_;
+  const Flags flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
 };
 
+
+DEFINE_OPERATORS_FOR_FLAGS(MachineOperatorBuilder::Flags)
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-type.cc b/src/compiler/machine-type.cc
index 94aa124..7475a03 100644
--- a/src/compiler/machine-type.cc
+++ b/src/compiler/machine-type.cc
@@ -17,7 +17,7 @@
   }
 
 
-OStream& operator<<(OStream& os, const MachineType& type) {
+std::ostream& operator<<(std::ostream& os, const MachineType& type) {
   bool before = false;
   PRINT(kRepBit);
   PRINT(kRepWord8);
diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h
index 88b482c..4c51a9f 100644
--- a/src/compiler/machine-type.h
+++ b/src/compiler/machine-type.h
@@ -5,15 +5,14 @@
 #ifndef V8_COMPILER_MACHINE_TYPE_H_
 #define V8_COMPILER_MACHINE_TYPE_H_
 
+#include <iosfwd>
+
 #include "src/base/bits.h"
 #include "src/globals.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
-
-class OStream;
-
 namespace compiler {
 
 // Machine-level types and representations.
@@ -40,6 +39,7 @@
 
   // Machine types.
   kMachNone = 0,
+  kMachBool = kRepBit | kTypeBool,
   kMachFloat32 = kRepFloat32 | kTypeNumber,
   kMachFloat64 = kRepFloat64 | kTypeNumber,
   kMachInt8 = kRepWord8 | kTypeInt32,
@@ -50,11 +50,13 @@
   kMachUint32 = kRepWord32 | kTypeUint32,
   kMachInt64 = kRepWord64 | kTypeInt64,
   kMachUint64 = kRepWord64 | kTypeUint64,
+  kMachIntPtr = (kPointerSize == 4) ? kMachInt32 : kMachInt64,
+  kMachUintPtr = (kPointerSize == 4) ? kMachUint32 : kMachUint64,
   kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
   kMachAnyTagged = kRepTagged | kTypeAny
 };
 
-OStream& operator<<(OStream& os, const MachineType& type);
+std::ostream& operator<<(std::ostream& os, const MachineType& type);
 
 typedef uint16_t MachineTypeUnion;
 
@@ -79,26 +81,34 @@
   return static_cast<MachineType>(result);
 }
 
-// Gets the element size in bytes of the machine type.
-inline int ElementSizeOf(MachineType machine_type) {
+// Gets the log2 of the element size in bytes of the machine type.
+inline int ElementSizeLog2Of(MachineType machine_type) {
   switch (RepresentationOf(machine_type)) {
     case kRepBit:
     case kRepWord8:
-      return 1;
+      return 0;
     case kRepWord16:
-      return 2;
+      return 1;
     case kRepWord32:
     case kRepFloat32:
-      return 4;
+      return 2;
     case kRepWord64:
     case kRepFloat64:
-      return 8;
+      return 3;
     case kRepTagged:
-      return kPointerSize;
+      return kPointerSizeLog2;
     default:
-      UNREACHABLE();
-      return kPointerSize;
+      break;
   }
+  UNREACHABLE();
+  return -1;
+}
+
+// Gets the element size in bytes of the machine type.
+inline int ElementSizeOf(MachineType machine_type) {
+  const int shift = ElementSizeLog2Of(machine_type);
+  DCHECK_NE(-1, shift);
+  return 1 << shift;
 }
 
 // Describes the inputs and outputs of a function or call.
diff --git a/src/compiler/mips/OWNERS b/src/compiler/mips/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/compiler/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
new file mode 100644
index 0000000..dd92837
--- /dev/null
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -0,0 +1,1185 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kCompareReg kLithiumScratchReg2
+#define kScratchReg2 kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg)                                                      \
+  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+         __LINE__)
+
+#define TRACE_UNIMPL()                                                       \
+  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+         __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  FloatRegister OutputSingleRegister(int index = 0) {
+    return ToSingleRegister(instr_->OutputAt(index));
+  }
+
+  FloatRegister InputSingleRegister(int index) {
+    return ToSingleRegister(instr_->InputAt(index));
+  }
+
+  FloatRegister ToSingleRegister(InstructionOperand* op) {
+    // Single (Float) and Double register namespace is same on MIPS,
+    // both are typedefs of FPURegister.
+    return ToDoubleRegister(op);
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+        //    maybe not done on arm due to const pool ??
+        break;
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
+        break;
+    }
+    UNREACHABLE();
+    return Operand(zero_reg);
+  }
+
+  Operand InputOperand(int index) {
+    InstructionOperand* op = instr_->InputAt(index);
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return InputImmediate(index);
+  }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        // TODO(plind): r6 address mode, to be implemented ...
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  FloatRegister const result_;
+};
+
+
+class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, zero_reg); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+  OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    // Handle rounding to zero case where sign has to be preserved.
+    // High bits of double input already in kScratchReg.
+    __ srl(at, kScratchReg, 31);
+    __ sll(at, at, 31);
+    __ Mthc1(at, result_);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+  OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+  OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+  OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+  do {                                                                        \
+    auto result = i.Output##width##Register();                                \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(2), offset);                                \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+  do {                                                                        \
+    auto result = i.OutputRegister();                                         \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(2), offset);                                \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(3), offset);                         \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(3), offset);                         \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation)                  \
+  do {                                                                         \
+    auto ool =                                                                 \
+        new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister());     \
+    Label done;                                                                \
+    __ Mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
+    __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
+           HeapNumber::kExponentBits);                                         \
+    __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
+              Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+    __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));          \
+    __ Move(at, kScratchReg2, i.OutputDoubleRegister());                       \
+    __ or_(at, at, kScratchReg2);                                              \
+    __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
+    __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
+    __ bind(ool->exit());                                                      \
+    __ bind(&done);                                                            \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  MipsOperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+        __ Call(at);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+      }
+
+      __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(at);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), sp);
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kMipsAdd:
+      __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsAddOvf:
+      __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+                                 i.InputOperand(1), kCompareReg, kScratchReg);
+      break;
+    case kMipsSub:
+      __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsSubOvf:
+      __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+                                 i.InputOperand(1), kCompareReg, kScratchReg);
+      break;
+    case kMipsMul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsMulHigh:
+      __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsMulHighU:
+      __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsDiv:
+      __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsDivU:
+      __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsMod:
+      __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsModU:
+      __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsAnd:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsOr:
+      __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsXor:
+      __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsShl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsShr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsSar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsRor:
+      __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsTst:
+      // Pseudo-instruction used for tst/branch. No opcode emitted here.
+      break;
+    case kMipsCmp:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMipsMov:
+      // TODO(plind): Should we combine mov/li like this, or use separate instr?
+      //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+      if (HasRegisterInput(instr, 0)) {
+        __ mov(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ li(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+
+    case kMipsCmpD:
+      // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+      break;
+    case kMipsAddD:
+      // TODO(plind): add special case: combine mult & add.
+      __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsSubD:
+      __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsMulD:
+      // TODO(plind): add special case: right op is -1.0, see arm port.
+      __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsDivD:
+      __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsModD: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      break;
+    }
+    case kMipsFloat64Floor: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+      break;
+    }
+    case kMipsFloat64Ceil: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+      break;
+    }
+    case kMipsFloat64RoundTruncate: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+      break;
+    }
+    case kMipsSqrtD: {
+      __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMipsCvtSD: {
+      __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMipsCvtDS: {
+      __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+      break;
+    }
+    case kMipsCvtDW: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ mtc1(i.InputRegister(0), scratch);
+      __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kMipsCvtDUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      break;
+    }
+    case kMipsTruncWD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // Other arches use round to zero here, so we follow.
+      __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+      __ mfc1(i.OutputRegister(), scratch);
+      break;
+    }
+    case kMipsTruncUwD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
+    // ... more basic instructions ...
+
+    case kMipsLbu:
+      __ lbu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsLb:
+      __ lb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSb:
+      __ sb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLhu:
+      __ lhu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsLh:
+      __ lh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSh:
+      __ sh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLw:
+      __ lw(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSw:
+      __ sw(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLwc1: {
+      __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+      break;
+    }
+    case kMipsSwc1: {
+      int index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ swc1(i.InputSingleRegister(index), operand);
+      break;
+    }
+    case kMipsLdc1:
+      __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kMipsSdc1:
+      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kMipsPush:
+      __ Push(i.InputRegister(0));
+      break;
+    case kMipsStackClaim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
+      break;
+    }
+    case kMipsStoreToStackSlot: {
+      int slot = MiscField::decode(instr->opcode());
+      __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+      break;
+    }
+    case kMipsStoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ addu(index, object, index);
+      __ sw(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      RAStatus ra_status = kRAHasNotBeenSaved;
+      __ RecordWrite(object, index, value, ra_status, mode);
+      break;
+    }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sw);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+      break;
+  }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition)                                  \
+  OFStream out(stdout);                                                      \
+  out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+  UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  MipsOperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips pseudo-instructions, which are handled here by branch
+  // instructions that do the actual comparison. Essential that the input
+  // registers to compare pseudo-op are not modified before this branch op, as
+  // they are tested here.
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+
+  if (instr->arch_opcode() == kMipsTst) {
+    switch (branch->condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsTst, branch->condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(tlabel, cc, at, Operand(zero_reg));
+
+  } else if (instr->arch_opcode() == kMipsAddOvf ||
+             instr->arch_opcode() == kMipsSubOvf) {
+    // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+    switch (branch->condition) {
+      case kOverflow:
+        cc = lt;
+        break;
+      case kNotOverflow:
+        cc = ge;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+
+  } else if (instr->arch_opcode() == kMipsCmp) {
+    switch (branch->condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+
+  } else if (instr->arch_opcode() == kMipsCmpD) {
+    // TODO(dusmil) optimize unordered checks to use fewer instructions
+    // even if we have to unfold BranchF macro.
+    Label* nan = flabel;
+    switch (branch->condition) {
+      case kUnorderedEqual:
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        cc = ne;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThan:
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        cc = ge;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThanOrEqual:
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        cc = gt;
+        nan = tlabel;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmpD, branch->condition);
+        break;
+    }
+    __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+           instr->arch_opcode());
+    UNIMPLEMENTED();
+  }
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  MipsOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label false_value;
+  DCHECK_NE(0, instr->OutputCount());
+  Register result = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips psuedo-instructions, which are checked and handled here.
+
+  // For materializations, we use delay slot to set the result true, and
+  // in the false case, where we fall thru the branch, we reset the result
+  // false.
+
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+  if (instr->arch_opcode() == kMipsTst) {
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsTst, condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+
+  } else if (instr->arch_opcode() == kMipsAddOvf ||
+             instr->arch_opcode() == kMipsSubOvf) {
+    // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
+    switch (condition) {
+      case kOverflow:
+        cc = lt;
+        break;
+      case kNotOverflow:
+        cc = ge;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+
+
+  } else if (instr->arch_opcode() == kMipsCmp) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+
+  } else if (instr->arch_opcode() == kMipsCmpD) {
+    FPURegister left = i.InputDoubleRegister(0);
+    FPURegister right = i.InputDoubleRegister(1);
+    // TODO(plind): Provide NaN-testing macro-asm function without need for
+    // BranchF.
+    FPURegister dummy1 = f0;
+    FPURegister dummy2 = f2;
+    switch (condition) {
+      case kUnorderedEqual:
+        // TODO(plind):  improve the NaN testing throughout this function.
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ne;
+        break;
+      case kUnorderedLessThan:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ge;
+        break;
+      case kUnorderedLessThanOrEqual:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = gt;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, condition);
+        break;
+    }
+    __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot - branch taken returns 1.
+                                // Fall-thru (branch not taken) returns 0.
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+           instr->arch_opcode());
+    TRACE_UNIMPL();
+    UNIMPLEMENTED();
+  }
+  // Fallthru case is the false materialization.
+  __ bind(&false_value);
+  __ li(result, Operand(0));
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ Push(ra, fp);
+    __ mov(fp, sp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      // TODO(plind): make callee save size const, possibly DCHECK it.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ MultiPush(saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = this->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ MultiPop(saves);
+      }
+    }
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    __ Ret();
+  } else {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ DropAndRet(pop_count);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ sw(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ lw(temp, src);
+      __ sw(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ li(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kFloat32:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kInt64:
+          UNREACHABLE();
+          break;
+        case Constant::kFloat64:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ li(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ li(dst, src.ToHeapObject());
+          break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips.
+          break;
+      }
+      if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
+    } else if (src.type() == Constant::kFloat32) {
+      if (destination->IsDoubleStackSlot()) {
+        MemOperand dst = g.ToMemOperand(destination);
+        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ sw(at, dst);
+      } else {
+        FloatRegister dst = g.ToSingleRegister(destination);
+        __ Move(dst, src.ToFloat32());
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      __ Move(dst, src.ToFloat64());
+      if (destination->IsDoubleStackSlot()) {
+        __ sdc1(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ sdc1(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(g.ToDoubleRegister(destination), src);
+    } else {
+      FPURegister temp = kScratchDoubleReg;
+      __ ldc1(temp, src);
+      __ sdc1(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ lw(src, dst);
+      __ sw(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    Register temp_1 = kCompareReg;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ lw(temp_0, src);
+    __ lw(temp_1, dst);
+    __ sw(temp_0, dst);
+    __ sw(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    FPURegister temp = kScratchDoubleReg;
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ ldc1(src, dst);
+      __ sdc1(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    FPURegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+    __ ldc1(temp_1, dst0);  // Save destination in temp_1.
+    __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+    __ sw(temp_0, dst0);
+    __ lw(temp_0, src1);
+    __ sw(temp_0, dst1);
+    __ sdc1(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // Unused on 32-bit ARM. Still exists on 64-bit arm.
+  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block tramoline pool emission for duration of padding.
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
new file mode 100644
index 0000000..3aa508f
--- /dev/null
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -0,0 +1,93 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(MipsAdd)                       \
+  V(MipsAddOvf)                    \
+  V(MipsSub)                       \
+  V(MipsSubOvf)                    \
+  V(MipsMul)                       \
+  V(MipsMulHigh)                   \
+  V(MipsMulHighU)                  \
+  V(MipsDiv)                       \
+  V(MipsDivU)                      \
+  V(MipsMod)                       \
+  V(MipsModU)                      \
+  V(MipsAnd)                       \
+  V(MipsOr)                        \
+  V(MipsXor)                       \
+  V(MipsShl)                       \
+  V(MipsShr)                       \
+  V(MipsSar)                       \
+  V(MipsRor)                       \
+  V(MipsMov)                       \
+  V(MipsTst)                       \
+  V(MipsCmp)                       \
+  V(MipsCmpD)                      \
+  V(MipsAddD)                      \
+  V(MipsSubD)                      \
+  V(MipsMulD)                      \
+  V(MipsDivD)                      \
+  V(MipsModD)                      \
+  V(MipsSqrtD)                     \
+  V(MipsFloat64Floor)              \
+  V(MipsFloat64Ceil)               \
+  V(MipsFloat64RoundTruncate)      \
+  V(MipsCvtSD)                     \
+  V(MipsCvtDS)                     \
+  V(MipsTruncWD)                   \
+  V(MipsTruncUwD)                  \
+  V(MipsCvtDW)                     \
+  V(MipsCvtDUw)                    \
+  V(MipsLb)                        \
+  V(MipsLbu)                       \
+  V(MipsSb)                        \
+  V(MipsLh)                        \
+  V(MipsLhu)                       \
+  V(MipsSh)                        \
+  V(MipsLw)                        \
+  V(MipsSw)                        \
+  V(MipsLwc1)                      \
+  V(MipsSwc1)                      \
+  V(MipsLdc1)                      \
+  V(MipsSdc1)                      \
+  V(MipsPush)                      \
+  V(MipsStoreToStackSlot)          \
+  V(MipsStackClaim)                \
+  V(MipsStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
new file mode 100644
index 0000000..5e8e3b1
--- /dev/null
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -0,0 +1,831 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class MipsOperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit MipsOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    Int32Matcher m(node);
+    if (!m.HasValue()) return false;
+    int32_t value = m.Value();
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMipsShl:
+      case kMipsSar:
+      case kMipsShr:
+        return is_uint5(value);
+      case kMipsXor:
+        return is_uint16(value);
+      case kMipsLdc1:
+      case kMipsSdc1:
+      case kCheckedLoadFloat32:
+      case kCheckedLoadFloat64:
+      case kCheckedStoreFloat32:
+      case kCheckedStoreFloat64:
+        return is_int16(value + kIntSize);
+      default:
+        return is_int16(value);
+    }
+  }
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    TRACE_UNIMPL();
+    return false;
+  }
+};
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  MipsOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMipsLwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMipsLdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kMipsLw;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+    Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
+         g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMipsSwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMipsSdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kMipsSb;
+      break;
+    case kRepWord16:
+      opcode = kMipsSh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kMipsSw;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+         g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kMipsAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kMipsOr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitBinop(this, node, kMipsXor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kMipsShl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kMipsShr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kMipsSar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kMipsRor, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  MipsOperandGenerator g(this);
+
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMipsAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitBinop(this, node, kMipsSub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMipsSub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsMulHigh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsTruncUwD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRR(this, kMipsAddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRR(this, kMipsSubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRR(this, kMipsMulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kMipsDivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRR(this, kMipsFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRR(this, kMipsFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kMipsFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+  MipsOperandGenerator g(this);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(node, &buffer, true, false);
+  // Possibly align stack here for functions.
+  int push_count = buffer.pushed_nodes.size();
+  if (push_count > 0) {
+    Emit(kMipsStackClaim | MiscField::encode(push_count), NULL);
+  }
+  int slot = buffer.pushed_nodes.size() - 1;
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    Emit(kMipsStoreToStackSlot | MiscField::encode(slot), NULL,
+         g.UseRegister(*input));
+    slot--;
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  InstructionOperand** first_output =
+      buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), first_output,
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+  call_instr->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  MipsOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MipsOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
+       length_operand, g.UseRegister(value), g.UseRegister(buffer));
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  MipsOperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    // TODO(plind): Revisit and test this path.
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  MipsOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
+               cont);
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative) {
+  MipsOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, opcode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, opcode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kMipsCmp, cont, false);
+}
+
+}  // namespace
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = node->FindProjection(0);
+          if (!result || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMipsAddOvf, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMipsSubOvf, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kMipsTst, cont, true);
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Continuation could not be combined with a compare, emit compare against 0.
+  MipsOperandGenerator g(selector);
+  InstructionCode const opcode = cont->Encode(kMipsCmp);
+  InstructionOperand* const value_operand = g.UseRegister(value);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+                   g.TempImmediate(0));
+  }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMipsAddOvf, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMipsAddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMipsSubOvf, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMipsSubOvf, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate;
+  }
+  return MachineOperatorBuilder::kNoFlags;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips/linkage-mips.cc b/src/compiler/mips/linkage-mips.cc
new file mode 100644
index 0000000..2b314a2
--- /dev/null
+++ b/src/compiler/mips/linkage-mips.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+  static Register ReturnValueReg() { return v0; }
+  static Register ReturnValue2Reg() { return v1; }
+  static Register JSCallFunctionReg() { return a1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return a1; }
+  static Register RuntimeCallArgCountReg() { return a0; }
+  static RegList CCalleeSaveRegisters() {
+    return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+           s6.bit() | s7.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {a0, a1, a2, a3};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips64/OWNERS b/src/compiler/mips64/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/compiler/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
new file mode 100644
index 0000000..dee7705
--- /dev/null
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -0,0 +1,1444 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kScratchReg2 kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg)                                                      \
+  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+         __LINE__)
+
+#define TRACE_UNIMPL()                                                       \
+  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+         __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  FloatRegister OutputSingleRegister(int index = 0) {
+    return ToSingleRegister(instr_->OutputAt(index));
+  }
+
+  FloatRegister InputSingleRegister(int index) {
+    return ToSingleRegister(instr_->InputAt(index));
+  }
+
+  FloatRegister ToSingleRegister(InstructionOperand* op) {
+    // Single (Float) and Double register namespace is same on MIPS,
+    // both are typedefs of FPURegister.
+    return ToDoubleRegister(op);
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kInt64:
+        return Operand(constant.ToInt64());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+        //    maybe not done on arm due to const pool ??
+        break;
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
+        break;
+    }
+    UNREACHABLE();
+    return Operand(zero_reg);
+  }
+
+  Operand InputOperand(int index) {
+    InstructionOperand* op = instr_->InputAt(index);
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return InputImmediate(index);
+  }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        // TODO(plind): r6 address mode, to be implemented ...
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  FloatRegister const result_;
+};
+
+
+class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, zero_reg); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+  OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    // Handle rounding to zero case where sign has to be preserved.
+    // High bits of double input already in kScratchReg.
+    __ dsrl(at, kScratchReg, 31);
+    __ dsll(at, at, 31);
+    __ mthc1(at, result_);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+  OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+  OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+  OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+  do {                                                                        \
+    auto result = i.Output##width##Register();                                \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(2), offset);                               \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+  do {                                                                        \
+    auto result = i.OutputRegister();                                         \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(2), offset);                               \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(3), offset);                        \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(3), offset);                        \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation)                  \
+  do {                                                                         \
+    auto ool =                                                                 \
+        new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister());     \
+    Label done;                                                                \
+    __ mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
+    __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
+           HeapNumber::kExponentBits);                                         \
+    __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
+              Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+    __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));          \
+    __ dmfc1(at, i.OutputDoubleRegister());                                    \
+    __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
+    __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
+    __ bind(ool->exit());                                                      \
+    __ bind(&done);                                                            \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  MipsOperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+        __ Call(at);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+      }
+
+      __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(at);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), sp);
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kMips64Add:
+      __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dadd:
+      __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Sub:
+      __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dsub:
+      __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Mul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64MulHigh:
+      __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64MulHighU:
+      __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Div:
+      __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DivU:
+      __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Mod:
+      __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64ModU:
+      __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dmul:
+      __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Ddiv:
+      __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DdivU:
+      __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dmod:
+      __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DmodU:
+      __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64And:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Or:
+      __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Xor:
+      __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Shl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Shr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Sar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Ext:
+      __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+             i.InputInt8(2));
+      break;
+    case kMips64Dext:
+      __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      break;
+    case kMips64Dshl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Dshr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Dsar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Ror:
+      __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dror:
+      __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Tst:
+    case kMips64Tst32:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMips64Cmp:
+    case kMips64Cmp32:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMips64Mov:
+      // TODO(plind): Should we combine mov/li like this, or use separate instr?
+      //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+      if (HasRegisterInput(instr, 0)) {
+        __ mov(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ li(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+
+    case kMips64CmpD:
+      // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+      break;
+    case kMips64AddD:
+      // TODO(plind): add special case: combine mult & add.
+      __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64SubD:
+      __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64MulD:
+      // TODO(plind): add special case: right op is -1.0, see arm port.
+      __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64DivD:
+      __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64ModD: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      break;
+    }
+    case kMips64Float64Floor: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+      break;
+    }
+    case kMips64Float64Ceil: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+      break;
+    }
+    case kMips64Float64RoundTruncate: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+      break;
+    }
+    case kMips64SqrtD: {
+      __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMips64CvtSD: {
+      __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMips64CvtDS: {
+      __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+      break;
+    }
+    case kMips64CvtDW: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ mtc1(i.InputRegister(0), scratch);
+      __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kMips64CvtDUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      break;
+    }
+    case kMips64TruncWD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // Other arches use round to zero here, so we follow.
+      __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+      __ mfc1(i.OutputRegister(), scratch);
+      break;
+    }
+    case kMips64TruncUwD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
+    // ... more basic instructions ...
+
+    case kMips64Lbu:
+      __ lbu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Lb:
+      __ lb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sb:
+      __ sb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lhu:
+      __ lhu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Lh:
+      __ lh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sh:
+      __ sh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lw:
+      __ lw(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Ld:
+      __ ld(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sw:
+      __ sw(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Sd:
+      __ sd(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lwc1: {
+      __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+      break;
+    }
+    case kMips64Swc1: {
+      int index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ swc1(i.InputSingleRegister(index), operand);
+      break;
+    }
+    case kMips64Ldc1:
+      __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sdc1:
+      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Push:
+      __ Push(i.InputRegister(0));
+      break;
+    case kMips64StackClaim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
+      break;
+    }
+    case kMips64StoreToStackSlot: {
+      int slot = MiscField::decode(instr->opcode());
+      __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+      break;
+    }
+    case kMips64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ daddu(index, object, index);
+      __ sd(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      RAStatus ra_status = kRAHasNotBeenSaved;
+      __ RecordWrite(object, index, value, ra_status, mode);
+      break;
+    }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sw);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+      break;
+  }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition)                                  \
+  OFStream out(stdout);                                                      \
+  out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+  UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  MipsOperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips psuedo-instructions, which are handled here by branch
+  // instructions that do the actual comparison. Essential that the input
+  // registers to compare psuedo-op are not modified before this branch op, as
+  // they are tested here.
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+
+  if (instr->arch_opcode() == kMips64Tst) {
+    switch (branch->condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, branch->condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(tlabel, cc, at, Operand(zero_reg));
+  } else if (instr->arch_opcode() == kMips64Tst32) {
+    switch (branch->condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst32, branch->condition);
+        break;
+    }
+    // Zero-extend registers on MIPS64 only 64-bit operand
+    // branch and compare op. is available.
+    // This is a disadvantage to perform 32-bit operation on MIPS64.
+    // Try to force globally in front-end Word64 representation to be preferred
+    // for MIPS64 even for Word32.
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Dext(at, at, 0, 32);
+    __ Branch(tlabel, cc, at, Operand(zero_reg));
+  } else if (instr->arch_opcode() == kMips64Dadd ||
+             instr->arch_opcode() == kMips64Dsub) {
+    switch (branch->condition) {
+      case kOverflow:
+        cc = ne;
+        break;
+      case kNotOverflow:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Dadd, branch->condition);
+        break;
+    }
+
+    __ dsra32(kScratchReg, i.OutputRegister(), 0);
+    __ sra(at, i.OutputRegister(), 31);
+    __ Branch(tlabel, cc, at, Operand(kScratchReg));
+  } else if (instr->arch_opcode() == kMips64Cmp) {
+    switch (branch->condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+
+  } else if (instr->arch_opcode() == kMips64Cmp32) {
+    switch (branch->condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
+        break;
+    }
+
+    switch (branch->condition) {
+      case kEqual:
+      case kNotEqual:
+      case kSignedLessThan:
+      case kSignedGreaterThanOrEqual:
+      case kSignedLessThanOrEqual:
+      case kSignedGreaterThan:
+        // Sign-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+        }
+        break;
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        // Zero-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+        }
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  } else if (instr->arch_opcode() == kMips64CmpD) {
+    // TODO(dusmil) optimize unordered checks to use less instructions
+    // even if we have to unfold BranchF macro.
+    Label* nan = flabel;
+    switch (branch->condition) {
+      case kUnorderedEqual:
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        cc = ne;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThan:
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        cc = ge;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThanOrEqual:
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        cc = gt;
+        nan = tlabel;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+        break;
+    }
+    __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+           instr->arch_opcode());
+    UNIMPLEMENTED();
+  }
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  MipsOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label false_value;
+  DCHECK_NE(0, instr->OutputCount());
+  Register result = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips pseudo-instructions, which are checked and handled here.
+
+  // For materializations, we use delay slot to set the result true, and
+  // in the false case, where we fall through the branch, we reset the result
+  // false.
+
+  if (instr->arch_opcode() == kMips64Tst) {
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Tst32) {
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, condition);
+        break;
+    }
+    // Zero-extend register on MIPS64 only 64-bit operand
+    // branch and compare op. is available.
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Dext(at, at, 0, 32);
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Dadd ||
+             instr->arch_opcode() == kMips64Dsub) {
+    switch (condition) {
+      case kOverflow:
+        cc = ne;
+        break;
+      case kNotOverflow:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64DAdd, condition);
+        break;
+    }
+    __ dsra32(kScratchReg, i.OutputRegister(), 0);
+    __ sra(at, i.OutputRegister(), 31);
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Cmp) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Cmp32) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+
+    switch (condition) {
+      case kEqual:
+      case kNotEqual:
+      case kSignedLessThan:
+      case kSignedGreaterThanOrEqual:
+      case kSignedLessThanOrEqual:
+      case kSignedGreaterThan:
+        // Sign-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ sll(left, left, 0);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+        }
+        break;
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        // Zero-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ Dext(left, left, 0, 32);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+        }
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp32, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64CmpD) {
+    FPURegister left = i.InputDoubleRegister(0);
+    FPURegister right = i.InputDoubleRegister(1);
+    // TODO(plind): Provide NaN-testing macro-asm function without need for
+    // BranchF.
+    FPURegister dummy1 = f0;
+    FPURegister dummy2 = f2;
+    switch (condition) {
+      case kUnorderedEqual:
+        // TODO(plind):  improve the NaN testing throughout this function.
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ne;
+        break;
+      case kUnorderedLessThan:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ge;
+        break;
+      case kUnorderedLessThanOrEqual:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = gt;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+    __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot - branch taken returns 1.
+                                // Fall-thru (branch not taken) returns 0.
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+           instr->arch_opcode());
+    TRACE_UNIMPL();
+    UNIMPLEMENTED();
+  }
+  // Fallthru case is the false materialization.
+  __ bind(&false_value);
+  __ li(result, Operand(static_cast<int64_t>(0)));
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ Push(ra, fp);
+    __ mov(fp, sp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      // TODO(plind): make callee save size const, possibly DCHECK it.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ MultiPush(saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = this->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      __ Branch(&ok, ne, a2, Operand(at));
+
+      __ ld(a2, GlobalObjectOperand());
+      __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+      __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ MultiPop(saves);
+      }
+    }
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    __ Ret();
+  } else {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ DropAndRet(pop_count);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ sd(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ld(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ ld(temp, src);
+      __ sd(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ li(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kFloat32:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kInt64:
+          __ li(dst, Operand(src.ToInt64()));
+          break;
+        case Constant::kFloat64:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ li(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ li(dst, src.ToHeapObject());
+          break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips64.
+          break;
+      }
+      if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
+    } else if (src.type() == Constant::kFloat32) {
+      if (destination->IsDoubleStackSlot()) {
+        MemOperand dst = g.ToMemOperand(destination);
+        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ sw(at, dst);
+      } else {
+        FloatRegister dst = g.ToSingleRegister(destination);
+        __ Move(dst, src.ToFloat32());
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      __ Move(dst, src.ToFloat64());
+      if (destination->IsDoubleStackSlot()) {
+        __ sdc1(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ sdc1(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(g.ToDoubleRegister(destination), src);
+    } else {
+      FPURegister temp = kScratchDoubleReg;
+      __ ldc1(temp, src);
+      __ sdc1(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ ld(src, dst);
+      __ sd(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    Register temp_1 = kScratchReg2;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ ld(temp_0, src);
+    __ ld(temp_1, dst);
+    __ sd(temp_0, dst);
+    __ sd(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    FPURegister temp = kScratchDoubleReg;
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ ldc1(src, dst);
+      __ sdc1(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    FPURegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+    __ ldc1(temp_1, dst0);  // Save destination in temp_1.
+    __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+    __ sw(temp_0, dst0);
+    __ lw(temp_0, src1);
+    __ sw(temp_0, dst1);
+    __ sdc1(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // Unused on 32-bit ARM. Still exists on 64-bit arm.
+  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block tramoline pool emission for duration of padding.
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
new file mode 100644
index 0000000..dd019f9
--- /dev/null
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(Mips64Add)                     \
+  V(Mips64Dadd)                    \
+  V(Mips64Sub)                     \
+  V(Mips64Dsub)                    \
+  V(Mips64Mul)                     \
+  V(Mips64MulHigh)                 \
+  V(Mips64MulHighU)                \
+  V(Mips64Dmul)                    \
+  V(Mips64Div)                     \
+  V(Mips64Ddiv)                    \
+  V(Mips64DivU)                    \
+  V(Mips64DdivU)                   \
+  V(Mips64Mod)                     \
+  V(Mips64Dmod)                    \
+  V(Mips64ModU)                    \
+  V(Mips64DmodU)                   \
+  V(Mips64And)                     \
+  V(Mips64Or)                      \
+  V(Mips64Xor)                     \
+  V(Mips64Shl)                     \
+  V(Mips64Shr)                     \
+  V(Mips64Sar)                     \
+  V(Mips64Ext)                     \
+  V(Mips64Dext)                    \
+  V(Mips64Dshl)                    \
+  V(Mips64Dshr)                    \
+  V(Mips64Dsar)                    \
+  V(Mips64Ror)                     \
+  V(Mips64Dror)                    \
+  V(Mips64Mov)                     \
+  V(Mips64Tst)                     \
+  V(Mips64Tst32)                   \
+  V(Mips64Cmp)                     \
+  V(Mips64Cmp32)                   \
+  V(Mips64CmpD)                    \
+  V(Mips64AddD)                    \
+  V(Mips64SubD)                    \
+  V(Mips64MulD)                    \
+  V(Mips64DivD)                    \
+  V(Mips64ModD)                    \
+  V(Mips64SqrtD)                   \
+  V(Mips64Float64Floor)            \
+  V(Mips64Float64Ceil)             \
+  V(Mips64Float64RoundTruncate)    \
+  V(Mips64CvtSD)                   \
+  V(Mips64CvtDS)                   \
+  V(Mips64TruncWD)                 \
+  V(Mips64TruncUwD)                \
+  V(Mips64CvtDW)                   \
+  V(Mips64CvtDUw)                  \
+  V(Mips64Lb)                      \
+  V(Mips64Lbu)                     \
+  V(Mips64Sb)                      \
+  V(Mips64Lh)                      \
+  V(Mips64Lhu)                     \
+  V(Mips64Sh)                      \
+  V(Mips64Ld)                      \
+  V(Mips64Lw)                      \
+  V(Mips64Sw)                      \
+  V(Mips64Sd)                      \
+  V(Mips64Lwc1)                    \
+  V(Mips64Swc1)                    \
+  V(Mips64Ldc1)                    \
+  V(Mips64Sdc1)                    \
+  V(Mips64Push)                    \
+  V(Mips64StoreToStackSlot)        \
+  V(Mips64StackClaim)              \
+  V(Mips64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
new file mode 100644
index 0000000..35ad16b
--- /dev/null
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -0,0 +1,1079 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class Mips64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit Mips64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMips64Shl:
+      case kMips64Sar:
+      case kMips64Shr:
+        return is_uint5(value);
+      case kMips64Dshl:
+      case kMips64Dsar:
+      case kMips64Dshr:
+        return is_uint6(value);
+      case kMips64Xor:
+        return is_uint16(value);
+      case kMips64Ldc1:
+      case kMips64Sdc1:
+        return is_int16(value + kIntSize);
+      default:
+        return is_int16(value);
+    }
+  }
+
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode,
+                      FlagsContinuation* cont) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMips64Cmp32:
+        switch (cont->condition()) {
+          case kUnsignedLessThan:
+          case kUnsignedGreaterThanOrEqual:
+          case kUnsignedLessThanOrEqual:
+          case kUnsignedGreaterThan:
+            // Immediate operands for unsigned 32-bit compare operations
+            // should not be sign-extended.
+            return is_uint15(value);
+          default:
+            return false;
+        }
+      default:
+        return is_int16(value);
+    }
+  }
+
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    TRACE_UNIMPL();
+    return false;
+  }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMips64Lwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMips64Ldc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+      break;
+    case kRepWord32:
+      opcode = kMips64Lw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kMips64Ld;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+    Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
+         g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMips64Swc1;
+      break;
+    case kRepFloat64:
+      opcode = kMips64Sdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kMips64Sb;
+      break;
+    case kRepWord16:
+      opcode = kMips64Sh;
+      break;
+    case kRepWord32:
+      opcode = kMips64Sw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kMips64Sd;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+         g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kMips64Shl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kMips64Shr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kMips64Sar, node);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitRRO(this, kMips64Dshl, node);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitRRO(this, kMips64Dshr, node);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kMips64Dsar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kMips64Ror, node);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kMips64Dror, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMips64Add);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMips64Dadd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitBinop(this, node, kMips64Sub);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  VisitBinop(this, node, kMips64Dsub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMips64Add | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64MulHigh, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  Mips64OperandGenerator g(this);
+  InstructionOperand* const dmul_operand = g.TempRegister();
+  Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+  Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
+       g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // TODO(dusmil): Add optimization for shifts larger than 32.
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int64_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDUw, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64TruncWD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64TruncUwD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRR(this, kMips64AddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRR(this, kMips64SubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRR(this, kMips64MulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kMips64DivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64ModD, g.DefineAsFixed(node, f0),
+       g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRR(this, kMips64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRR(this, kMips64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kMips64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+  Mips64OperandGenerator g(this);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(node, &buffer, true, false);
+
+  int push_count = buffer.pushed_nodes.size();
+  if (push_count > 0) {
+    Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
+  }
+  int slot = buffer.pushed_nodes.size() - 1;
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
+         g.UseRegister(*input));
+    slot--;
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  Mips64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  Mips64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
+       length_operand, g.UseRegister(value), g.UseRegister(buffer));
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
+               cont);
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative) {
+  Mips64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, opcode, cont)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, opcode, cont)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+}
+
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kMips64Cmp, cont, false);
+}
+
+}  // namespace
+
+
+void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
+                         Node* value, FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  InstructionOperand* const value_operand = g.UseRegister(value);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+                   g.TempImmediate(0));
+  }
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  // Initially set comparison against 0 to be 64-bit variant for branches that
+  // cannot combine.
+  InstructionCode opcode = kMips64Cmp;
+  while (selector->CanCover(user, value)) {
+    if (user->opcode() == IrOpcode::kWord32Equal) {
+      opcode = kMips64Cmp32;
+    }
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          opcode = kMips64Cmp32;
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kWord64Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int64BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt64LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMips64Dadd, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMips64Dsub, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
+      case IrOpcode::kWord64And:
+        return VisitWordCompare(selector, value, kMips64Tst, cont, true);
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Continuation could not be combined with a compare, emit compare against 0.
+  EmitWordCompareZero(selector, opcode, value, cont);
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMips64Dadd, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMips64Dadd, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMips64Dsub, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMips64Dsub, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat64Floor |
+         MachineOperatorBuilder::kFloat64Ceil |
+         MachineOperatorBuilder::kFloat64RoundTruncate;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips64/linkage-mips64.cc b/src/compiler/mips64/linkage-mips64.cc
new file mode 100644
index 0000000..0e1a590
--- /dev/null
+++ b/src/compiler/mips64/linkage-mips64.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+  static Register ReturnValueReg() { return v0; }
+  static Register ReturnValue2Reg() { return v1; }
+  static Register JSCallFunctionReg() { return a1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return a1; }
+  static Register RuntimeCallArgCountReg() { return a0; }
+  static RegList CCalleeSaveRegisters() {
+    return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+           s6.bit() | s7.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
new file mode 100644
index 0000000..330f32f
--- /dev/null
+++ b/src/compiler/move-optimizer.cc
@@ -0,0 +1,205 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/move-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
+    : local_zone_(local_zone),
+      code_(code),
+      temp_vector_0_(local_zone),
+      temp_vector_1_(local_zone) {}
+
+
+void MoveOptimizer::Run() {
+  // First smash all consecutive moves into the left most move slot.
+  for (auto* block : code()->instruction_blocks()) {
+    GapInstruction* prev_gap = nullptr;
+    for (int index = block->code_start(); index < block->code_end(); ++index) {
+      auto instr = code()->instructions()[index];
+      if (!instr->IsGapMoves()) {
+        if (instr->IsSourcePosition() || instr->IsNop()) continue;
+        FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+        prev_gap = nullptr;
+        continue;
+      }
+      auto gap = GapInstruction::cast(instr);
+      // Find first non-empty slot.
+      int i = GapInstruction::FIRST_INNER_POSITION;
+      for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
+        auto move = gap->parallel_moves()[i];
+        if (move == nullptr) continue;
+        auto move_ops = move->move_operands();
+        auto op = move_ops->begin();
+        for (; op != move_ops->end(); ++op) {
+          if (!op->IsRedundant()) break;
+        }
+        if (op == move_ops->end()) {
+          move_ops->Rewind(0);  // Clear this redundant move.
+        } else {
+          break;  // Found index of first non-redundant move.
+        }
+      }
+      // Nothing to do here.
+      if (i == GapInstruction::LAST_INNER_POSITION + 1) {
+        if (prev_gap != nullptr) {
+          // Slide prev_gap down so we always know where to look for it.
+          std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+          prev_gap = gap;
+        }
+        continue;
+      }
+      // Move the first non-empty gap to position 0.
+      std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
+      auto left = gap->parallel_moves()[0];
+      // Compress everything into position 0.
+      for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
+        auto move = gap->parallel_moves()[i];
+        if (move == nullptr) continue;
+        CompressMoves(&temp_vector_0_, left, move);
+      }
+      if (prev_gap != nullptr) {
+        // Smash left into prev_gap, killing left.
+        auto pred_moves = prev_gap->parallel_moves()[0];
+        CompressMoves(&temp_vector_0_, pred_moves, left);
+        std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+      }
+      prev_gap = gap;
+    }
+    FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+  }
+}
+
+
+static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
+                                        Zone* zone) {
+  auto move_ops = left->move_operands();
+  MoveOperands* replacement = nullptr;
+  MoveOperands* to_eliminate = nullptr;
+  for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
+    if (curr->IsEliminated()) continue;
+    if (curr->destination()->Equals(move->source())) {
+      DCHECK_EQ(nullptr, replacement);
+      replacement = curr;
+      if (to_eliminate != nullptr) break;
+    } else if (curr->destination()->Equals(move->destination())) {
+      DCHECK_EQ(nullptr, to_eliminate);
+      to_eliminate = curr;
+      if (replacement != nullptr) break;
+    }
+  }
+  DCHECK(!(replacement == to_eliminate && replacement != nullptr));
+  if (replacement != nullptr) {
+    auto new_source = new (zone) InstructionOperand(
+        replacement->source()->kind(), replacement->source()->index());
+    move->set_source(new_source);
+  }
+  return to_eliminate;
+}
+
+
+void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+                                  ParallelMove* right) {
+  DCHECK(eliminated->empty());
+  auto move_ops = right->move_operands();
+  // Modify the right moves in place and collect moves that will be killed by
+  // merging the two gaps.
+  for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+    if (op->IsRedundant()) continue;
+    MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
+    if (to_eliminate != nullptr) {
+      eliminated->push_back(to_eliminate);
+    }
+  }
+  // Eliminate dead moves.  Must happen before insertion of new moves as the
+  // contents of eliminated are pointers into a list.
+  for (auto to_eliminate : *eliminated) {
+    to_eliminate->Eliminate();
+  }
+  eliminated->clear();
+  // Add all possibly modified moves from right side.
+  for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+    if (op->IsRedundant()) continue;
+    left->move_operands()->Add(*op, code_zone());
+  }
+  // Nuke right.
+  move_ops->Rewind(0);
+}
+
+
+void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+                                  GapInstruction* gap) {
+  DCHECK(loads->empty());
+  DCHECK(new_moves->empty());
+  if (gap == nullptr) return;
+  // Split multiple loads of the same constant or stack slot off into the second
+  // slot and keep remaining moves in the first slot.
+  auto move_ops = gap->parallel_moves()[0]->move_operands();
+  for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
+    if (move->IsRedundant()) {
+      move->Eliminate();
+      continue;
+    }
+    if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
+          move->source()->IsDoubleStackSlot()))
+      continue;
+    // Search for existing move to this slot.
+    MoveOperands* found = nullptr;
+    for (auto load : *loads) {
+      if (load->source()->Equals(move->source())) {
+        found = load;
+        break;
+      }
+    }
+    // Not found so insert.
+    if (found == nullptr) {
+      loads->push_back(move);
+      // Replace source with copy for later use.
+      auto dest = move->destination();
+      move->set_destination(new (code_zone())
+                            InstructionOperand(dest->kind(), dest->index()));
+      continue;
+    }
+    if ((found->destination()->IsStackSlot() ||
+         found->destination()->IsDoubleStackSlot()) &&
+        !(move->destination()->IsStackSlot() ||
+          move->destination()->IsDoubleStackSlot())) {
+      // Found a better source for this load.  Smash it in place to affect other
+      // loads that have already been split.
+      InstructionOperand::Kind found_kind = found->destination()->kind();
+      int found_index = found->destination()->index();
+      auto next_dest =
+          new (code_zone()) InstructionOperand(found_kind, found_index);
+      auto dest = move->destination();
+      found->destination()->ConvertTo(dest->kind(), dest->index());
+      move->set_destination(next_dest);
+    }
+    // move from load destination.
+    move->set_source(found->destination());
+    new_moves->push_back(move);
+  }
+  loads->clear();
+  if (new_moves->empty()) return;
+  // Insert all new moves into slot 1.
+  auto slot_1 = gap->GetOrCreateParallelMove(
+      static_cast<GapInstruction::InnerPosition>(1), code_zone());
+  DCHECK(slot_1->move_operands()->is_empty());
+  slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
+                                    static_cast<int>(new_moves->size()),
+                                    code_zone());
+  auto it = slot_1->move_operands()->begin();
+  for (auto new_move : *new_moves) {
+    std::swap(*new_move, *it);
+    ++it;
+  }
+  DCHECK_EQ(it, slot_1->move_operands()->end());
+  new_moves->clear();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
new file mode 100644
index 0000000..bbce686
--- /dev/null
+++ b/src/compiler/move-optimizer.h
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MOVE_OPTIMIZER_
+#define V8_COMPILER_MOVE_OPTIMIZER_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MoveOptimizer FINAL {
+ public:
+  MoveOptimizer(Zone* local_zone, InstructionSequence* code);
+  void Run();
+
+ private:
+  typedef ZoneVector<MoveOperands*> MoveOpVector;
+
+  InstructionSequence* code() const { return code_; }
+  Zone* local_zone() const { return local_zone_; }
+  Zone* code_zone() const { return code()->zone(); }
+
+  void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+                     ParallelMove* right);
+  void FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+                     GapInstruction* gap);
+
+  Zone* const local_zone_;
+  InstructionSequence* const code_;
+  MoveOpVector temp_vector_0_;
+  MoveOpVector temp_vector_1_;
+
+  DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MOVE_OPTIMIZER_
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
index 79f1abf..d8db4b9 100644
--- a/src/compiler/node-aux-data-inl.h
+++ b/src/compiler/node-aux-data-inl.h
@@ -29,7 +29,7 @@
 
 
 template <class T>
-T NodeAuxData<T>::Get(Node* node) {
+T NodeAuxData<T>::Get(Node* node) const {
   int id = node->id();
   if (id >= static_cast<int>(aux_data_.size())) {
     return T();
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
index 7acce33..a08dc58 100644
--- a/src/compiler/node-aux-data.h
+++ b/src/compiler/node-aux-data.h
@@ -21,7 +21,7 @@
   inline explicit NodeAuxData(Zone* zone);
 
   inline void Set(Node* node, const T& data);
-  inline T Get(Node* node);
+  inline T Get(Node* node) const;
 
  private:
   ZoneVector<T> aux_data_;
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 7cda167..92a3fa0 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -4,65 +4,51 @@
 
 #include "src/compiler/node-cache.h"
 
+#include <cstring>
+
+#include "src/zone.h"
+#include "src/zone-containers.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-#define INITIAL_SIZE 16
-#define LINEAR_PROBE 5
+namespace {
 
-template <typename Key>
-int32_t NodeCacheHash(Key key) {
-  UNIMPLEMENTED();
-  return 0;
-}
+enum { kInitialSize = 16u, kLinearProbe = 5u };
 
-template <>
-inline int32_t NodeCacheHash(int32_t key) {
-  return ComputeIntegerHash(key, 0);
-}
+}  // namespace
 
 
-template <>
-inline int32_t NodeCacheHash(int64_t key) {
-  return ComputeLongHash(key);
-}
+template <typename Key, typename Hash, typename Pred>
+struct NodeCache<Key, Hash, Pred>::Entry {
+  Key key_;
+  Node* value_;
+};
 
 
-template <>
-inline int32_t NodeCacheHash(double key) {
-  return ComputeLongHash(bit_cast<int64_t>(key));
-}
-
-
-template <>
-inline int32_t NodeCacheHash(void* key) {
-  return ComputePointerHash(key);
-}
-
-
-template <typename Key>
-bool NodeCache<Key>::Resize(Zone* zone) {
+template <typename Key, typename Hash, typename Pred>
+bool NodeCache<Key, Hash, Pred>::Resize(Zone* zone) {
   if (size_ >= max_) return false;  // Don't grow past the maximum size.
 
   // Allocate a new block of entries 4x the size.
   Entry* old_entries = entries_;
-  int old_size = size_ + LINEAR_PROBE;
-  size_ = size_ * 4;
-  int num_entries = size_ + LINEAR_PROBE;
-  entries_ = zone->NewArray<Entry>(num_entries);
+  size_t old_size = size_ + kLinearProbe;
+  size_ *= 4;
+  size_t num_entries = size_ + kLinearProbe;
+  entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
   memset(entries_, 0, sizeof(Entry) * num_entries);
 
   // Insert the old entries into the new block.
-  for (int i = 0; i < old_size; i++) {
+  for (size_t i = 0; i < old_size; ++i) {
     Entry* old = &old_entries[i];
-    if (old->value_ != NULL) {
-      int hash = NodeCacheHash(old->key_);
-      int start = hash & (size_ - 1);
-      int end = start + LINEAR_PROBE;
-      for (int j = start; j < end; j++) {
+    if (old->value_) {
+      size_t hash = hash_(old->key_);
+      size_t start = hash & (size_ - 1);
+      size_t end = start + kLinearProbe;
+      for (size_t j = start; j < end; ++j) {
         Entry* entry = &entries_[j];
-        if (entry->value_ == NULL) {
+        if (!entry->value_) {
           entry->key_ = old->key_;
           entry->value_ = old->value_;
           break;
@@ -74,28 +60,28 @@
 }
 
 
-template <typename Key>
-Node** NodeCache<Key>::Find(Zone* zone, Key key) {
-  int32_t hash = NodeCacheHash(key);
-  if (entries_ == NULL) {
+template <typename Key, typename Hash, typename Pred>
+Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
+  size_t hash = hash_(key);
+  if (!entries_) {
     // Allocate the initial entries and insert the first entry.
-    int num_entries = INITIAL_SIZE + LINEAR_PROBE;
-    entries_ = zone->NewArray<Entry>(num_entries);
-    size_ = INITIAL_SIZE;
+    size_t num_entries = kInitialSize + kLinearProbe;
+    entries_ = zone->NewArray<Entry>(static_cast<int>(num_entries));
+    size_ = kInitialSize;
     memset(entries_, 0, sizeof(Entry) * num_entries);
-    Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
+    Entry* entry = &entries_[hash & (kInitialSize - 1)];
     entry->key_ = key;
     return &entry->value_;
   }
 
-  while (true) {
+  for (;;) {
     // Search up to N entries after (linear probing).
-    int start = hash & (size_ - 1);
-    int end = start + LINEAR_PROBE;
-    for (int i = start; i < end; i++) {
+    size_t start = hash & (size_ - 1);
+    size_t end = start + kLinearProbe;
+    for (size_t i = start; i < end; i++) {
       Entry* entry = &entries_[i];
-      if (entry->key_ == key) return &entry->value_;
-      if (entry->value_ == NULL) {
+      if (pred_(entry->key_, key)) return &entry->value_;
+      if (!entry->value_) {
         entry->key_ = key;
         return &entry->value_;
       }
@@ -107,14 +93,28 @@
   // If resized to maximum and still didn't find space, overwrite an entry.
   Entry* entry = &entries_[hash & (size_ - 1)];
   entry->key_ = key;
-  entry->value_ = NULL;
+  entry->value_ = nullptr;
   return &entry->value_;
 }
 
 
-template class NodeCache<int64_t>;
+template <typename Key, typename Hash, typename Pred>
+void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
+  if (entries_) {
+    for (size_t i = 0; i < size_ + kLinearProbe; i++) {
+      if (entries_[i].value_) nodes->push_back(entries_[i].value_);
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations
+
+
 template class NodeCache<int32_t>;
-template class NodeCache<void*>;
-}
-}
-}  // namespace v8::internal::compiler
+template class NodeCache<int64_t>;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
index 35352ea..b123922 100644
--- a/src/compiler/node-cache.h
+++ b/src/compiler/node-cache.h
@@ -5,20 +5,33 @@
 #ifndef V8_COMPILER_NODE_CACHE_H_
 #define V8_COMPILER_NODE_CACHE_H_
 
-#include "src/v8.h"
-
-#include "src/compiler/node.h"
+#include "src/base/functional.h"
+#include "src/base/macros.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class Zone;
+template <typename>
+class ZoneVector;
+
+
 namespace compiler {
 
+// Forward declarations.
+class Node;
+
+
 // A cache for nodes based on a key. Useful for implementing canonicalization of
 // nodes such as constants, parameters, etc.
-template <typename Key>
-class NodeCache {
+template <typename Key, typename Hash = base::hash<Key>,
+          typename Pred = std::equal_to<Key> >
+class NodeCache FINAL {
  public:
-  explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
+  explicit NodeCache(unsigned max = 256)
+      : entries_(nullptr), size_(0), max_(max) {}
+  ~NodeCache() {}
 
   // Search for node associated with {key} and return a pointer to a memory
   // location in this cache that stores an entry for the key. If the location
@@ -29,25 +42,34 @@
   // too full or encounters too many hash collisions.
   Node** Find(Zone* zone, Key key);
 
+  // Appends all nodes from this cache to {nodes}.
+  void GetCachedNodes(ZoneVector<Node*>* nodes);
+
  private:
-  struct Entry {
-    Key key_;
-    Node* value_;
-  };
+  struct Entry;
 
   Entry* entries_;  // lazily-allocated hash entries.
-  int32_t size_;
-  int32_t max_;
+  size_t size_;
+  size_t max_;
+  Hash hash_;
+  Pred pred_;
 
   bool Resize(Zone* zone);
+
+  DISALLOW_COPY_AND_ASSIGN(NodeCache);
 };
 
 // Various default cache types.
-typedef NodeCache<int64_t> Int64NodeCache;
 typedef NodeCache<int32_t> Int32NodeCache;
-typedef NodeCache<void*> PtrNodeCache;
-}
-}
-}  // namespace v8::internal::compiler
+typedef NodeCache<int64_t> Int64NodeCache;
+#if V8_HOST_ARCH_32_BIT
+typedef Int32NodeCache IntPtrNodeCache;
+#else
+typedef Int64NodeCache IntPtrNodeCache;
+#endif
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_NODE_CACHE_H_
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index e62eaee..fc11a0a 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -5,8 +5,11 @@
 #ifndef V8_COMPILER_NODE_MATCHERS_H_
 #define V8_COMPILER_NODE_MATCHERS_H_
 
+#include <cmath>
+
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
+#include "src/unique.h"
 
 namespace v8 {
 namespace internal {
@@ -38,6 +41,8 @@
 // A pattern matcher for abitrary value constants.
 template <typename T, IrOpcode::Value kOpcode>
 struct ValueMatcher : public NodeMatcher {
+  typedef T ValueType;
+
   explicit ValueMatcher(Node* node)
       : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
     if (has_value_) {
@@ -65,21 +70,62 @@
 };
 
 
+template <>
+inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
+    : NodeMatcher(node), value_(), has_value_(false) {
+  if (opcode() == IrOpcode::kInt32Constant) {
+    value_ = OpParameter<int32_t>(node);
+    has_value_ = true;
+  } else if (opcode() == IrOpcode::kInt64Constant) {
+    value_ = OpParameter<int64_t>(node);
+    has_value_ = true;
+  }
+}
+
+
+template <>
+inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
+    Node* node)
+    : NodeMatcher(node), value_(), has_value_(false) {
+  if (opcode() == IrOpcode::kInt32Constant) {
+    value_ = OpParameter<uint32_t>(node);
+    has_value_ = true;
+  } else if (opcode() == IrOpcode::kInt64Constant) {
+    value_ = OpParameter<uint64_t>(node);
+    has_value_ = true;
+  }
+}
+
+
 // A pattern matcher for integer constants.
 template <typename T, IrOpcode::Value kOpcode>
 struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
   explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
 
+  bool IsMultipleOf(T n) const {
+    return this->HasValue() && (this->Value() % n) == 0;
+  }
   bool IsPowerOf2() const {
     return this->HasValue() && this->Value() > 0 &&
            (this->Value() & (this->Value() - 1)) == 0;
   }
+  bool IsNegativePowerOf2() const {
+    return this->HasValue() && this->Value() < 0 &&
+           (-this->Value() & (-this->Value() - 1)) == 0;
+  }
 };
 
 typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
 typedef IntMatcher<uint32_t, IrOpcode::kInt32Constant> Uint32Matcher;
 typedef IntMatcher<int64_t, IrOpcode::kInt64Constant> Int64Matcher;
 typedef IntMatcher<uint64_t, IrOpcode::kInt64Constant> Uint64Matcher;
+#if V8_HOST_ARCH_32_BIT
+typedef Int32Matcher IntPtrMatcher;
+typedef Uint32Matcher UintPtrMatcher;
+#else
+typedef Int64Matcher IntPtrMatcher;
+typedef Uint64Matcher UintPtrMatcher;
+#endif
 
 
 // A pattern matcher for floating point constants.
@@ -87,6 +133,9 @@
 struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
   explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
 
+  bool IsMinusZero() const {
+    return this->Is(0.0) && std::signbit(this->Value());
+  }
   bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
 };
 
@@ -108,11 +157,18 @@
 // right hand sides of a binary operation and can put constants on the right
 // if they appear on the left hand side of a commutative operation.
 template <typename Left, typename Right>
-struct BinopMatcher FINAL : public NodeMatcher {
+struct BinopMatcher : public NodeMatcher {
   explicit BinopMatcher(Node* node)
       : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
     if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
   }
+  BinopMatcher(Node* node, bool allow_input_swap)
+      : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+    if (allow_input_swap) PutConstantOnRight();
+  }
+
+  typedef Left LeftMatcher;
+  typedef Right RightMatcher;
 
   const Left& left() const { return left_; }
   const Right& right() const { return right_; }
@@ -120,12 +176,17 @@
   bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
   bool LeftEqualsRight() const { return left().node() == right().node(); }
 
+ protected:
+  void SwapInputs() {
+    std::swap(left_, right_);
+    node()->ReplaceInput(0, left().node());
+    node()->ReplaceInput(1, right().node());
+  }
+
  private:
   void PutConstantOnRight() {
     if (left().HasValue() && !right().HasValue()) {
-      std::swap(left_, right_);
-      node()->ReplaceInput(0, left().node());
-      node()->ReplaceInput(1, right().node());
+      SwapInputs();
     }
   }
 
@@ -137,7 +198,318 @@
 typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
 typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
 typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
+typedef BinopMatcher<IntPtrMatcher, IntPtrMatcher> IntPtrBinopMatcher;
+typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
 typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
+
+
+template <class BinopMatcher, IrOpcode::Value kMulOpcode,
+          IrOpcode::Value kShiftOpcode>
+struct ScaleMatcher {
+  explicit ScaleMatcher(Node* node, bool allow_power_of_two_plus_one = false)
+      : scale_(-1), power_of_two_plus_one_(false) {
+    if (node->InputCount() < 2) return;
+    BinopMatcher m(node);
+    if (node->opcode() == kShiftOpcode) {
+      if (m.right().HasValue()) {
+        typename BinopMatcher::RightMatcher::ValueType value =
+            m.right().Value();
+        if (value >= 0 && value <= 3) {
+          scale_ = static_cast<int>(value);
+        }
+      }
+    } else if (node->opcode() == kMulOpcode) {
+      if (m.right().HasValue()) {
+        typename BinopMatcher::RightMatcher::ValueType value =
+            m.right().Value();
+        if (value == 1) {
+          scale_ = 0;
+        } else if (value == 2) {
+          scale_ = 1;
+        } else if (value == 4) {
+          scale_ = 2;
+        } else if (value == 8) {
+          scale_ = 3;
+        } else if (allow_power_of_two_plus_one) {
+          if (value == 3) {
+            scale_ = 1;
+            power_of_two_plus_one_ = true;
+          } else if (value == 5) {
+            scale_ = 2;
+            power_of_two_plus_one_ = true;
+          } else if (value == 9) {
+            scale_ = 3;
+            power_of_two_plus_one_ = true;
+          }
+        }
+      }
+    }
+  }
+
+  bool matches() const { return scale_ != -1; }
+  int scale() const { return scale_; }
+  bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
+
+ private:
+  int scale_;
+  bool power_of_two_plus_one_;
+};
+
+typedef ScaleMatcher<Int32BinopMatcher, IrOpcode::kInt32Mul,
+                     IrOpcode::kWord32Shl> Int32ScaleMatcher;
+typedef ScaleMatcher<Int64BinopMatcher, IrOpcode::kInt64Mul,
+                     IrOpcode::kWord64Shl> Int64ScaleMatcher;
+
+
+template <class BinopMatcher, IrOpcode::Value kAddOpcode,
+          IrOpcode::Value kMulOpcode, IrOpcode::Value kShiftOpcode>
+struct AddMatcher : public BinopMatcher {
+  static const IrOpcode::Value kOpcode = kAddOpcode;
+  typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
+
+  AddMatcher(Node* node, bool allow_input_swap)
+      : BinopMatcher(node, allow_input_swap),
+        scale_(-1),
+        power_of_two_plus_one_(false) {
+    Initialize(node, allow_input_swap);
+  }
+  explicit AddMatcher(Node* node)
+      : BinopMatcher(node, node->op()->HasProperty(Operator::kCommutative)),
+        scale_(-1),
+        power_of_two_plus_one_(false) {
+    Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+  }
+
+  bool HasIndexInput() const { return scale_ != -1; }
+  Node* IndexInput() const {
+    DCHECK(HasIndexInput());
+    return this->left().node()->InputAt(0);
+  }
+  int scale() const {
+    DCHECK(HasIndexInput());
+    return scale_;
+  }
+  bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
+
+ private:
+  void Initialize(Node* node, bool allow_input_swap) {
+    Matcher left_matcher(this->left().node(), true);
+    if (left_matcher.matches()) {
+      scale_ = left_matcher.scale();
+      power_of_two_plus_one_ = left_matcher.power_of_two_plus_one();
+      return;
+    }
+
+    if (!allow_input_swap) {
+      return;
+    }
+
+    Matcher right_matcher(this->right().node(), true);
+    if (right_matcher.matches()) {
+      scale_ = right_matcher.scale();
+      power_of_two_plus_one_ = right_matcher.power_of_two_plus_one();
+      this->SwapInputs();
+      return;
+    }
+
+    if (this->right().opcode() == kAddOpcode &&
+        this->left().opcode() != kAddOpcode) {
+      this->SwapInputs();
+    }
+  }
+
+  int scale_;
+  bool power_of_two_plus_one_;
+};
+
+typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Mul,
+                   IrOpcode::kWord32Shl> Int32AddMatcher;
+typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
+                   IrOpcode::kWord64Shl> Int64AddMatcher;
+
+
+template <class AddMatcher>
+struct BaseWithIndexAndDisplacementMatcher {
+  BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
+      : matches_(false),
+        index_(NULL),
+        scale_(0),
+        base_(NULL),
+        displacement_(NULL) {
+    Initialize(node, allow_input_swap);
+  }
+
+  explicit BaseWithIndexAndDisplacementMatcher(Node* node)
+      : matches_(false),
+        index_(NULL),
+        scale_(0),
+        base_(NULL),
+        displacement_(NULL) {
+    Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+  }
+
+  bool matches() const { return matches_; }
+  Node* index() const { return index_; }
+  int scale() const { return scale_; }
+  Node* base() const { return base_; }
+  Node* displacement() const { return displacement_; }
+
+ private:
+  bool matches_;
+  Node* index_;
+  int scale_;
+  Node* base_;
+  Node* displacement_;
+
+  void Initialize(Node* node, bool allow_input_swap) {
+    // The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
+    // displacements and scale factors that are used as inputs, so instead of
+    // enumerating all possible patterns by brute force, checking for node
+    // clusters using the following templates in the following order suffices to
+    // find all of the interesting cases (S = index * scale, B = base input, D =
+    // displacement input):
+    // (S + (B + D))
+    // (S + (B + B))
+    // (S + D)
+    // (S + B)
+    // ((S + D) + B)
+    // ((S + B) + D)
+    // ((B + D) + B)
+    // ((B + B) + D)
+    // (B + D)
+    // (B + B)
+    if (node->InputCount() < 2) return;
+    AddMatcher m(node, allow_input_swap);
+    Node* left = m.left().node();
+    Node* right = m.right().node();
+    Node* displacement = NULL;
+    Node* base = NULL;
+    Node* index = NULL;
+    Node* scale_expression = NULL;
+    bool power_of_two_plus_one = false;
+    int scale = 0;
+    if (m.HasIndexInput() && left->OwnedBy(node)) {
+      index = m.IndexInput();
+      scale = m.scale();
+      scale_expression = left;
+      power_of_two_plus_one = m.power_of_two_plus_one();
+      if (right->opcode() == AddMatcher::kOpcode && right->OwnedBy(node)) {
+        AddMatcher right_matcher(right);
+        if (right_matcher.right().HasValue()) {
+          // (S + (B + D))
+          base = right_matcher.left().node();
+          displacement = right_matcher.right().node();
+        } else {
+          // (S + (B + B))
+          base = right;
+        }
+      } else if (m.right().HasValue()) {
+        // (S + D)
+        displacement = right;
+      } else {
+        // (S + B)
+        base = right;
+      }
+    } else {
+      if (left->opcode() == AddMatcher::kOpcode && left->OwnedBy(node)) {
+        AddMatcher left_matcher(left);
+        Node* left_left = left_matcher.left().node();
+        Node* left_right = left_matcher.right().node();
+        if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+          if (left_matcher.right().HasValue()) {
+            // ((S + D) + B)
+            index = left_matcher.IndexInput();
+            scale = left_matcher.scale();
+            scale_expression = left_left;
+            power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+            displacement = left_right;
+            base = right;
+          } else if (m.right().HasValue()) {
+            // ((S + B) + D)
+            index = left_matcher.IndexInput();
+            scale = left_matcher.scale();
+            scale_expression = left_left;
+            power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+            base = left_right;
+            displacement = right;
+          } else {
+            // (B + B)
+            index = left;
+            base = right;
+          }
+        } else {
+          if (left_matcher.right().HasValue()) {
+            // ((B + D) + B)
+            index = left_left;
+            displacement = left_right;
+            base = right;
+          } else if (m.right().HasValue()) {
+            // ((B + B) + D)
+            index = left_left;
+            base = left_right;
+            displacement = right;
+          } else {
+            // (B + B)
+            index = left;
+            base = right;
+          }
+        }
+      } else {
+        if (m.right().HasValue()) {
+          // (B + D)
+          base = left;
+          displacement = right;
+        } else {
+          // (B + B)
+          base = left;
+          index = right;
+        }
+      }
+    }
+    int64_t value = 0;
+    if (displacement != NULL) {
+      switch (displacement->opcode()) {
+        case IrOpcode::kInt32Constant: {
+          value = OpParameter<int32_t>(displacement);
+          break;
+        }
+        case IrOpcode::kInt64Constant: {
+          value = OpParameter<int64_t>(displacement);
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+      if (value == 0) {
+        displacement = NULL;
+      }
+    }
+    if (power_of_two_plus_one) {
+      if (base != NULL) {
+        // If the scale requires explicitly using the index as the base, but a
+        // base is already part of the match, then the (1 << N + 1) scale factor
+        // can't be folded into the match and the entire index * scale
+        // calculation must be computed separately.
+        index = scale_expression;
+        scale = 0;
+      } else {
+        base = index;
+      }
+    }
+    base_ = base;
+    displacement_ = displacement;
+    index_ = index;
+    scale_ = scale;
+    matches_ = true;
+  }
+};
+
+typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
+    BaseWithIndexAndDisplacement32Matcher;
+typedef BaseWithIndexAndDisplacementMatcher<Int64AddMatcher>
+    BaseWithIndexAndDisplacement64Matcher;
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/node-properties-inl.h b/src/compiler/node-properties-inl.h
index 3f6d531..0d29614 100644
--- a/src/compiler/node-properties-inl.h
+++ b/src/compiler/node-properties-inl.h
@@ -8,11 +8,9 @@
 #include "src/v8.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/compiler/operator-properties-inl.h"
 #include "src/compiler/operator-properties.h"
 
 namespace v8 {
@@ -44,8 +42,7 @@
 
 
 inline int NodeProperties::PastValueIndex(Node* node) {
-  return FirstValueIndex(node) +
-         OperatorProperties::GetValueInputCount(node->op());
+  return FirstValueIndex(node) + node->op()->ValueInputCount();
 }
 
 inline int NodeProperties::PastContextIndex(Node* node) {
@@ -59,13 +56,11 @@
 }
 
 inline int NodeProperties::PastEffectIndex(Node* node) {
-  return FirstEffectIndex(node) +
-         OperatorProperties::GetEffectInputCount(node->op());
+  return FirstEffectIndex(node) + node->op()->EffectInputCount();
 }
 
 inline int NodeProperties::PastControlIndex(Node* node) {
-  return FirstControlIndex(node) +
-         OperatorProperties::GetControlInputCount(node->op());
+  return FirstControlIndex(node) + node->op()->ControlInputCount();
 }
 
 
@@ -73,8 +68,7 @@
 // Input accessors.
 
 inline Node* NodeProperties::GetValueInput(Node* node, int index) {
-  DCHECK(0 <= index &&
-         index < OperatorProperties::GetValueInputCount(node->op()));
+  DCHECK(0 <= index && index < node->op()->ValueInputCount());
   return node->InputAt(FirstValueIndex(node) + index);
 }
 
@@ -89,14 +83,12 @@
 }
 
 inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
-  DCHECK(0 <= index &&
-         index < OperatorProperties::GetEffectInputCount(node->op()));
+  DCHECK(0 <= index && index < node->op()->EffectInputCount());
   return node->InputAt(FirstEffectIndex(node) + index);
 }
 
 inline Node* NodeProperties::GetControlInput(Node* node, int index) {
-  DCHECK(0 <= index &&
-         index < OperatorProperties::GetControlInputCount(node->op()));
+  DCHECK(0 <= index && index < node->op()->ControlInputCount());
   return node->InputAt(FirstControlIndex(node) + index);
 }
 
@@ -108,7 +100,7 @@
 // -----------------------------------------------------------------------------
 // Edge kinds.
 
-inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+inline bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
   // TODO(titzer): edge.index() is linear time;
   // edges maybe need to be marked as value/effect/control.
   if (num == 0) return false;
@@ -116,28 +108,28 @@
   return first <= index && index < first + num;
 }
 
-inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+inline bool NodeProperties::IsValueEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstValueIndex(node),
-                      OperatorProperties::GetValueInputCount(node->op()));
+                      node->op()->ValueInputCount());
 }
 
-inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+inline bool NodeProperties::IsContextEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstContextIndex(node),
                       OperatorProperties::GetContextInputCount(node->op()));
 }
 
-inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+inline bool NodeProperties::IsEffectEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstEffectIndex(node),
-                      OperatorProperties::GetEffectInputCount(node->op()));
+                      node->op()->EffectInputCount());
 }
 
-inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+inline bool NodeProperties::IsControlEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstControlIndex(node),
-                      OperatorProperties::GetControlInputCount(node->op()));
+                      node->op()->ControlInputCount());
 }
 
 
@@ -158,7 +150,7 @@
 
 inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
                                                int index) {
-  DCHECK(index < OperatorProperties::GetEffectInputCount(node->op()));
+  DCHECK(index < node->op()->EffectInputCount());
   return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
 }
 
@@ -169,7 +161,7 @@
 }
 
 inline void NodeProperties::RemoveNonValueInputs(Node* node) {
-  node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op()));
+  node->TrimInputCount(node->op()->ValueInputCount());
 }
 
 
@@ -177,19 +169,18 @@
 // {effect}. If {effect == NULL}, then use the effect input to {node}.
 inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
                                              Node* effect) {
-  DCHECK(!OperatorProperties::HasControlOutput(node->op()));
-  if (effect == NULL && OperatorProperties::HasEffectInput(node->op())) {
+  DCHECK(node->op()->ControlOutputCount() == 0);
+  if (effect == NULL && node->op()->EffectInputCount() > 0) {
     effect = NodeProperties::GetEffectInput(node);
   }
 
   // Requires distinguishing between value and effect edges.
-  UseIter iter = node->uses().begin();
-  while (iter != node->uses().end()) {
-    if (NodeProperties::IsEffectEdge(iter.edge())) {
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) {
       DCHECK_NE(NULL, effect);
-      iter = iter.UpdateToAndIncrement(effect);
+      edge.UpdateTo(effect);
     } else {
-      iter = iter.UpdateToAndIncrement(value);
+      edge.UpdateTo(value);
     }
   }
 }
@@ -198,12 +189,35 @@
 // -----------------------------------------------------------------------------
 // Type Bounds.
 
-inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); }
+inline bool NodeProperties::IsTyped(Node* node) {
+  Bounds bounds = node->bounds();
+  DCHECK((bounds.lower == NULL) == (bounds.upper == NULL));
+  return bounds.upper != NULL;
+}
+
+inline Bounds NodeProperties::GetBounds(Node* node) {
+  DCHECK(IsTyped(node));
+  return node->bounds();
+}
+
+inline void NodeProperties::RemoveBounds(Node* node) {
+  Bounds empty;
+  node->set_bounds(empty);
+}
 
 inline void NodeProperties::SetBounds(Node* node, Bounds b) {
+  DCHECK(b.lower != NULL && b.upper != NULL);
   node->set_bounds(b);
 }
 
+inline bool NodeProperties::AllValueInputsAreTyped(Node* node) {
+  int input_count = node->op()->ValueInputCount();
+  for (int i = 0; i < input_count; ++i) {
+    if (!IsTyped(GetValueInput(node, i))) return false;
+  }
+  return true;
+}
+
 
 }
 }
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 94bd731..025be78 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -25,10 +25,10 @@
 
   static inline int GetFrameStateIndex(Node* node);
 
-  static inline bool IsValueEdge(Node::Edge edge);
-  static inline bool IsContextEdge(Node::Edge edge);
-  static inline bool IsEffectEdge(Node::Edge edge);
-  static inline bool IsControlEdge(Node::Edge edge);
+  static inline bool IsValueEdge(Edge edge);
+  static inline bool IsContextEdge(Edge edge);
+  static inline bool IsEffectEdge(Edge edge);
+  static inline bool IsControlEdge(Edge edge);
 
   static inline bool IsControl(Node* node);
 
@@ -40,8 +40,11 @@
   static inline void ReplaceWithValue(Node* node, Node* value,
                                       Node* effect = NULL);
 
+  static inline bool IsTyped(Node* node);
   static inline Bounds GetBounds(Node* node);
   static inline void SetBounds(Node* node, Bounds bounds);
+  static inline void RemoveBounds(Node* node);
+  static inline bool AllValueInputsAreTyped(Node* node);
 
   static inline int FirstValueIndex(Node* node);
   static inline int FirstContextIndex(Node* node);
@@ -54,7 +57,7 @@
   static inline int PastEffectIndex(Node* node);
   static inline int PastControlIndex(Node* node);
 
-  static inline bool IsInputRange(Node::Edge edge, int first, int count);
+  static inline bool IsInputRange(Edge edge, int first, int count);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
index 7df736e..8f44c24 100644
--- a/src/compiler/node.cc
+++ b/src/compiler/node.cc
@@ -4,12 +4,54 @@
 
 #include "src/compiler/node.h"
 
-#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+Node::Node(NodeId id, int input_count, int reserved_input_count)
+    : id_(id),
+      bit_field_(InputCountField::encode(input_count) |
+                 ReservedInputCountField::encode(reserved_input_count) |
+                 HasAppendableInputsField::encode(false)),
+      first_use_(nullptr),
+      last_use_(nullptr) {
+  inputs_.static_ = reinterpret_cast<Input*>(this + 1);
+}
+
+
+Node* Node::New(Graph* graph, int input_count, Node** inputs,
+                bool has_extensible_inputs) {
+  size_t node_size = sizeof(Node);
+  int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
+  size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
+  size_t uses_size = input_count * sizeof(Use);
+  int size = static_cast<int>(node_size + inputs_size + uses_size);
+  Zone* zone = graph->zone();
+  void* buffer = zone->New(size);
+  Node* result =
+      new (buffer) Node(graph->NextNodeID(), input_count, reserve_input_count);
+  Input* input =
+      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+  Use* use =
+      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+  for (int current = 0; current < input_count; ++current) {
+    Node* to = *inputs++;
+    input->to = to;
+    input->use = use;
+    use->input_index = current;
+    use->from = result;
+    to->AppendUse(use);
+    ++use;
+    ++input;
+  }
+  return result;
+}
+
+
 void Node::Kill() {
   DCHECK_NOT_NULL(op());
   RemoveAllInputs();
@@ -42,14 +84,31 @@
 }
 
 
-OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
+int Node::UseCount() const {
+  int use_count = 0;
+  for (const Use* use = first_use_; use; use = use->next) {
+    ++use_count;
+  }
+  return use_count;
+}
 
 
-OStream& operator<<(OStream& os, const Node& n) {
+Node* Node::UseAt(int index) const {
+  DCHECK_LE(0, index);
+  DCHECK_LT(index, UseCount());
+  Use* current = first_use_;
+  while (index-- != 0) {
+    current = current->next;
+  }
+  return current->from;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Node& n) {
   os << n.id() << ": " << *n.op();
-  if (n.op()->InputCount() != 0) {
+  if (n.InputCount() > 0) {
     os << "(";
-    for (int i = 0; i < n.op()->InputCount(); ++i) {
+    for (int i = 0; i < n.InputCount(); ++i) {
       if (i != 0) os << ", ";
       os << n.InputAt(i)->id();
     }
diff --git a/src/compiler/node.h b/src/compiler/node.h
index c3f5a53..2295b7b 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -9,20 +9,55 @@
 #include <set>
 #include <vector>
 
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-node.h"
+#include "src/v8.h"
+
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/types.h"
 #include "src/zone.h"
 #include "src/zone-allocator.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class NodeData {
+// Forward declarations.
+class Edge;
+class Graph;
+
+
+// Marks are used during traversal of the graph to distinguish states of nodes.
+// Each node has a mark which is a monotonically increasing integer, and a
+// {NodeMarker} has a range of values that indicate states of a node.
+typedef uint32_t Mark;
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef int NodeId;
+
+// A Node is the basic primitive of graphs. Nodes are chained together by
+// input/use chains but by default otherwise contain only an identifying number
+// which specific applications of graphs and nodes can use to index auxiliary
+// out-of-line data, especially transient data.
+//
+// In addition Nodes only contain a mutable Operator that may change during
+// compilation, e.g. during lowering passes. Other information that needs to be
+// associated with Nodes during compilation must be stored out-of-line indexed
+// by the Node's id.
+class Node FINAL {
  public:
+  void Initialize(const Operator* op) {
+    set_op(op);
+    set_mark(0);
+  }
+
+  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+  void Kill();
+
+  void CollectProjections(ZoneVector<Node*>* projections);
+  Node* FindProjection(size_t projection_index);
+
   const Operator* op() const { return op_; }
   void set_op(const Operator* op) { op_ = op; }
 
@@ -31,44 +66,387 @@
     return static_cast<IrOpcode::Value>(op_->opcode());
   }
 
-  Bounds bounds() { return bounds_; }
+  NodeId id() const { return id_; }
+
+  int InputCount() const { return input_count(); }
+  Node* InputAt(int index) const { return GetInputRecordPtr(index)->to; }
+  inline void ReplaceInput(int index, Node* new_input);
+  inline void AppendInput(Zone* zone, Node* new_input);
+  inline void InsertInput(Zone* zone, int index, Node* new_input);
+  inline void RemoveInput(int index);
+
+  int UseCount() const;
+  Node* UseAt(int index) const;
+  inline void ReplaceUses(Node* replace_to);
+  template <class UnaryPredicate>
+  inline void ReplaceUsesIf(UnaryPredicate pred, Node* replace_to);
+  inline void RemoveAllInputs();
+
+  inline void TrimInputCount(int input_count);
+
+  class InputEdges {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit InputEdges(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  class Inputs {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit Inputs(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  Inputs inputs() { return Inputs(this); }
+  InputEdges input_edges() { return InputEdges(this); }
+
+  class UseEdges {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit UseEdges(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  class Uses {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit Uses(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  Uses uses() { return Uses(this); }
+  UseEdges use_edges() { return UseEdges(this); }
+
+  bool OwnedBy(Node* owner) const;
+
+  static Node* New(Graph* graph, int input_count, Node** inputs,
+                   bool has_extensible_inputs);
 
  protected:
-  const Operator* op_;
-  Bounds bounds_;
-  explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
+  friend class Graph;
+  friend class Edge;
+
+  class Use : public ZoneObject {
+   public:
+    Node* from;
+    Use* next;
+    Use* prev;
+    int input_index;
+  };
+
+  class Input {
+   public:
+    Node* to;
+    Use* use;
+
+    void Update(Node* new_to);
+  };
+
+  void EnsureAppendableInputs(Zone* zone);
+
+  Input* GetInputRecordPtr(int index) const {
+    if (has_appendable_inputs()) {
+      return &((*inputs_.appendable_)[index]);
+    } else {
+      return &inputs_.static_[index];
+    }
+  }
+
+  inline void AppendUse(Use* use);
+  inline void RemoveUse(Use* use);
+
+  void* operator new(size_t, void* location) { return location; }
+
+ private:
+  inline Node(NodeId id, int input_count, int reserve_input_count);
+
+  typedef ZoneDeque<Input> InputDeque;
 
   friend class NodeProperties;
+  template <typename State>
+  friend class NodeMarker;
+
+  // Only NodeProperties should manipulate the bounds.
+  Bounds bounds() { return bounds_; }
   void set_bounds(Bounds b) { bounds_ = b; }
+
+  // Only NodeMarkers should manipulate the marks on nodes.
+  Mark mark() { return mark_; }
+  void set_mark(Mark mark) { mark_ = mark; }
+
+  int input_count() const { return InputCountField::decode(bit_field_); }
+  void set_input_count(int input_count) {
+    DCHECK_LE(0, input_count);
+    bit_field_ = InputCountField::update(bit_field_, input_count);
+  }
+
+  int reserved_input_count() const {
+    return ReservedInputCountField::decode(bit_field_);
+  }
+  void set_reserved_input_count(int reserved_input_count) {
+    DCHECK_LE(0, reserved_input_count);
+    bit_field_ =
+        ReservedInputCountField::update(bit_field_, reserved_input_count);
+  }
+
+  bool has_appendable_inputs() const {
+    return HasAppendableInputsField::decode(bit_field_);
+  }
+  void set_has_appendable_inputs(bool has_appendable_inputs) {
+    bit_field_ =
+        HasAppendableInputsField::update(bit_field_, has_appendable_inputs);
+  }
+
+  typedef BitField<unsigned, 0, 29> InputCountField;
+  typedef BitField<unsigned, 29, 2> ReservedInputCountField;
+  typedef BitField<unsigned, 31, 1> HasAppendableInputsField;
+  static const int kDefaultReservedInputs = ReservedInputCountField::kMax;
+
+  const Operator* op_;
+  Bounds bounds_;
+  Mark mark_;
+  NodeId id_;
+  unsigned bit_field_;
+  union {
+    // When a node is initially allocated, it uses a static buffer to hold its
+    // inputs under the assumption that the number of outputs will not increase.
+    // When the first input is appended, the static buffer is converted into a
+    // deque to allow for space-efficient growing.
+    Input* static_;
+    InputDeque* appendable_;
+  } inputs_;
+  Use* first_use_;
+  Use* last_use_;
+
+  DISALLOW_COPY_AND_ASSIGN(Node);
 };
 
-// A Node is the basic primitive of an IR graph. In addition to the members
-// inherited from Vector, Nodes only contain a mutable Operator that may change
-// during compilation, e.g. during lowering passes.  Other information that
-// needs to be associated with Nodes during compilation must be stored
-// out-of-line indexed by the Node's id.
-class Node FINAL : public GenericNode<NodeData, Node> {
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the node having the input.
+class Edge {
  public:
-  Node(GenericGraphBase* graph, int input_count)
-      : GenericNode<NodeData, Node>(graph, input_count) {}
+  Node* from() const { return input_->use->from; }
+  Node* to() const { return input_->to; }
+  int index() const {
+    int index = input_->use->input_index;
+    DCHECK(index < input_->use->from->input_count());
+    return index;
+  }
 
-  void Initialize(const Operator* op) { set_op(op); }
+  bool operator==(const Edge& other) { return input_ == other.input_; }
+  bool operator!=(const Edge& other) { return !(*this == other); }
 
-  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
-  void Kill();
+  void UpdateTo(Node* new_to) { input_->Update(new_to); }
 
-  void CollectProjections(ZoneVector<Node*>* projections);
-  Node* FindProjection(size_t projection_index);
+ private:
+  friend class Node::Uses::iterator;
+  friend class Node::Inputs::iterator;
+  friend class Node::UseEdges::iterator;
+  friend class Node::InputEdges::iterator;
+
+  explicit Edge(Node::Input* input) : input_(input) {}
+
+  Node::Input* input_;
 };
 
-OStream& operator<<(OStream& os, const Node& n);
 
-typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+// A forward iterator to visit the edges for the input dependencies of a node..
+class Node::InputEdges::iterator {
+ public:
+  typedef std::forward_iterator_tag iterator_category;
+  typedef int difference_type;
+  typedef Edge value_type;
+  typedef Edge* pointer;
+  typedef Edge& reference;
+  iterator(const Node::InputEdges::iterator& other)  // NOLINT
+      : input_(other.input_) {}
+  iterator() : input_(NULL) {}
+
+  Edge operator*() const { return Edge(input_); }
+  bool operator==(const iterator& other) const { return Equals(other); }
+  bool operator!=(const iterator& other) const { return !Equals(other); }
+  iterator& operator++() {
+    DCHECK(input_ != NULL);
+    Edge edge(input_);
+    Node* from = edge.from();
+    SetInput(from, input_->use->input_index + 1);
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+ private:
+  friend class Node;
+
+  explicit iterator(Node* from, int index = 0) : input_(NULL) {
+    SetInput(from, index);
+  }
+
+  bool Equals(const iterator& other) const { return other.input_ == input_; }
+  void SetInput(Node* from, int index) {
+    DCHECK(index >= 0 && index <= from->InputCount());
+    if (index < from->InputCount()) {
+      input_ = from->GetInputRecordPtr(index);
+    } else {
+      input_ = NULL;
+    }
+  }
+
+  Input* input_;
+};
+
+
+// A forward iterator to visit the inputs of a node.
+class Node::Inputs::iterator {
+ public:
+  typedef std::forward_iterator_tag iterator_category;
+  typedef int difference_type;
+  typedef Node* value_type;
+  typedef Node** pointer;
+  typedef Node*& reference;
+
+  iterator(const Node::Inputs::iterator& other)  // NOLINT
+      : iter_(other.iter_) {}
+
+  Node* operator*() const { return (*iter_).to(); }
+  bool operator==(const iterator& other) const { return Equals(other); }
+  bool operator!=(const iterator& other) const { return !Equals(other); }
+  iterator& operator++() {
+    ++iter_;
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+
+ private:
+  friend class Node::Inputs;
+
+  explicit iterator(Node* node, int index) : iter_(node, index) {}
+
+  bool Equals(const iterator& other) const { return other.iter_ == iter_; }
+
+  Node::InputEdges::iterator iter_;
+};
+
+// A forward iterator to visit the uses edges of a node. The edges are returned
+// in
+// the order in which they were added as inputs.
+class Node::UseEdges::iterator {
+ public:
+  iterator(const Node::UseEdges::iterator& other)  // NOLINT
+      : current_(other.current_),
+        next_(other.next_) {}
+
+  Edge operator*() const { return Edge(CurrentInput()); }
+
+  bool operator==(const iterator& other) { return Equals(other); }
+  bool operator!=(const iterator& other) { return !Equals(other); }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    current_ = next_;
+    next_ = (current_ == NULL) ? NULL : current_->next;
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+ private:
+  friend class Node::UseEdges;
+
+  iterator() : current_(NULL), next_(NULL) {}
+  explicit iterator(Node* node)
+      : current_(node->first_use_),
+        next_(current_ == NULL ? NULL : current_->next) {}
+
+  bool Equals(const iterator& other) const {
+    return other.current_ == current_;
+  }
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  Node::Use* current_;
+  Node::Use* next_;
+};
+
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+class Node::Uses::iterator {
+ public:
+  iterator(const Node::Uses::iterator& other)  // NOLINT
+      : current_(other.current_) {}
+
+  Node* operator*() { return current_->from; }
+
+  bool operator==(const iterator& other) { return other.current_ == current_; }
+  bool operator!=(const iterator& other) { return other.current_ != current_; }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    current_ = current_->next;
+    return *this;
+  }
+
+ private:
+  friend class Node::Uses;
+
+  iterator() : current_(NULL) {}
+  explicit iterator(Node* node) : current_(node->first_use_) {}
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  Node::Use* current_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Node& n);
 
 typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
 typedef NodeSet::iterator NodeSetIter;
 typedef NodeSet::reverse_iterator NodeSetRIter;
 
+typedef ZoneDeque<Node*> NodeDeque;
+
 typedef ZoneVector<Node*> NodeVector;
 typedef NodeVector::iterator NodeVectorIter;
 typedef NodeVector::const_iterator NodeVectorConstIter;
@@ -87,6 +465,195 @@
   return OpParameter<T>(node->op());
 }
 
+inline Node::InputEdges::iterator Node::InputEdges::begin() const {
+  return Node::InputEdges::iterator(this->node_, 0);
+}
+
+inline Node::InputEdges::iterator Node::InputEdges::end() const {
+  return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+}
+
+inline Node::Inputs::iterator Node::Inputs::begin() const {
+  return Node::Inputs::iterator(this->node_, 0);
+}
+
+inline Node::Inputs::iterator Node::Inputs::end() const {
+  return Node::Inputs::iterator(this->node_, this->node_->InputCount());
+}
+
+inline Node::UseEdges::iterator Node::UseEdges::begin() const {
+  return Node::UseEdges::iterator(this->node_);
+}
+
+inline Node::UseEdges::iterator Node::UseEdges::end() const {
+  return Node::UseEdges::iterator();
+}
+
+inline Node::Uses::iterator Node::Uses::begin() const {
+  return Node::Uses::iterator(this->node_);
+}
+
+inline Node::Uses::iterator Node::Uses::end() const {
+  return Node::Uses::iterator();
+}
+
+inline bool Node::InputEdges::empty() const { return begin() == end(); }
+inline bool Node::Uses::empty() const { return begin() == end(); }
+inline bool Node::UseEdges::empty() const { return begin() == end(); }
+inline bool Node::Inputs::empty() const { return begin() == end(); }
+
+inline void Node::ReplaceUses(Node* replace_to) {
+  for (Use* use = first_use_; use != NULL; use = use->next) {
+    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+  }
+  if (replace_to->last_use_ == NULL) {
+    DCHECK_EQ(NULL, replace_to->first_use_);
+    replace_to->first_use_ = first_use_;
+    replace_to->last_use_ = last_use_;
+  } else if (first_use_ != NULL) {
+    DCHECK_NE(NULL, replace_to->first_use_);
+    replace_to->last_use_->next = first_use_;
+    first_use_->prev = replace_to->last_use_;
+    replace_to->last_use_ = last_use_;
+  }
+  first_use_ = NULL;
+  last_use_ = NULL;
+}
+
+template <class UnaryPredicate>
+inline void Node::ReplaceUsesIf(UnaryPredicate pred, Node* replace_to) {
+  for (Use* use = first_use_; use != NULL;) {
+    Use* next = use->next;
+    if (pred(use->from)) {
+      RemoveUse(use);
+      replace_to->AppendUse(use);
+      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+    }
+    use = next;
+  }
+}
+
+inline void Node::RemoveAllInputs() {
+  for (Edge edge : input_edges()) {
+    edge.UpdateTo(NULL);
+  }
+}
+
+inline void Node::TrimInputCount(int new_input_count) {
+  if (new_input_count == input_count()) return;  // Nothing to do.
+
+  DCHECK(new_input_count < input_count());
+
+  // Update inline inputs.
+  for (int i = new_input_count; i < input_count(); i++) {
+    Node::Input* input = GetInputRecordPtr(i);
+    input->Update(NULL);
+  }
+  set_input_count(new_input_count);
+}
+
+inline void Node::ReplaceInput(int index, Node* new_to) {
+  Input* input = GetInputRecordPtr(index);
+  input->Update(new_to);
+}
+
+inline void Node::Input::Update(Node* new_to) {
+  Node* old_to = this->to;
+  if (new_to == old_to) return;  // Nothing to do.
+  // Snip out the use from where it used to be
+  if (old_to != NULL) {
+    old_to->RemoveUse(use);
+  }
+  to = new_to;
+  // And put it into the new node's use list.
+  if (new_to != NULL) {
+    new_to->AppendUse(use);
+  } else {
+    use->next = NULL;
+    use->prev = NULL;
+  }
+}
+
+inline void Node::EnsureAppendableInputs(Zone* zone) {
+  if (!has_appendable_inputs()) {
+    void* deque_buffer = zone->New(sizeof(InputDeque));
+    InputDeque* deque = new (deque_buffer) InputDeque(zone);
+    for (int i = 0; i < input_count(); ++i) {
+      deque->push_back(inputs_.static_[i]);
+    }
+    inputs_.appendable_ = deque;
+    set_has_appendable_inputs(true);
+  }
+}
+
+inline void Node::AppendInput(Zone* zone, Node* to_append) {
+  Use* new_use = new (zone) Use;
+  Input new_input;
+  new_input.to = to_append;
+  new_input.use = new_use;
+  if (reserved_input_count() > 0) {
+    DCHECK(!has_appendable_inputs());
+    set_reserved_input_count(reserved_input_count() - 1);
+    inputs_.static_[input_count()] = new_input;
+  } else {
+    EnsureAppendableInputs(zone);
+    inputs_.appendable_->push_back(new_input);
+  }
+  new_use->input_index = input_count();
+  new_use->from = this;
+  to_append->AppendUse(new_use);
+  set_input_count(input_count() + 1);
+}
+
+inline void Node::InsertInput(Zone* zone, int index, Node* to_insert) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  AppendInput(zone, InputAt(InputCount() - 1));
+  for (int i = InputCount() - 1; i > index; --i) {
+    ReplaceInput(i, InputAt(i - 1));
+  }
+  ReplaceInput(index, to_insert);
+}
+
+inline void Node::RemoveInput(int index) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  for (; index < InputCount() - 1; ++index) {
+    ReplaceInput(index, InputAt(index + 1));
+  }
+  TrimInputCount(InputCount() - 1);
+}
+
+inline void Node::AppendUse(Use* use) {
+  use->next = NULL;
+  use->prev = last_use_;
+  if (last_use_ == NULL) {
+    first_use_ = use;
+  } else {
+    last_use_->next = use;
+  }
+  last_use_ = use;
+}
+
+inline void Node::RemoveUse(Use* use) {
+  if (last_use_ == use) {
+    last_use_ = use->prev;
+  }
+  if (use->prev != NULL) {
+    use->prev->next = use->next;
+  } else {
+    first_use_ = use->next;
+  }
+  if (use->next != NULL) {
+    use->next->prev = use->prev;
+  }
+}
+
+inline bool Node::OwnedBy(Node* owner) const {
+  return first_use_ != NULL && first_use_->from == owner &&
+         first_use_->next == NULL;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/opcodes.cc b/src/compiler/opcodes.cc
new file mode 100644
index 0000000..044395c
--- /dev/null
+++ b/src/compiler/opcodes.cc
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/opcodes.h"
+
+#include <algorithm>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+char const* const kMnemonics[] = {
+#define DECLARE_MNEMONIC(x) #x,
+    ALL_OP_LIST(DECLARE_MNEMONIC)
+#undef DECLARE_MNEMONIC
+        "UnknownOpcode"};
+
+}  // namespace
+
+
+// static
+char const* IrOpcode::Mnemonic(Value value) {
+  size_t const n = std::max<size_t>(value, arraysize(kMnemonics) - 1);
+  return kMnemonics[n];
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index e210abd..d229b6d 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -7,13 +7,14 @@
 
 // Opcodes for control operators.
 #define INNER_CONTROL_OP_LIST(V) \
-  V(Dead)                  \
-  V(Loop)                  \
-  V(Branch)                \
-  V(IfTrue)                \
-  V(IfFalse)               \
-  V(Merge)                 \
-  V(Return)                \
+  V(Dead)                        \
+  V(Loop)                        \
+  V(Branch)                      \
+  V(IfTrue)                      \
+  V(IfFalse)                     \
+  V(Merge)                       \
+  V(Return)                      \
+  V(Terminate)                   \
   V(Throw)
 
 #define CONTROL_OP_LIST(V) \
@@ -32,9 +33,9 @@
   V(HeapConstant)
 
 #define INNER_OP_LIST(V) \
+  V(Select)              \
   V(Phi)                 \
   V(EffectPhi)           \
-  V(ControlEffect)       \
   V(ValueEffect)         \
   V(Finish)              \
   V(FrameState)          \
@@ -113,7 +114,7 @@
   V(JSCreateWithContext)      \
   V(JSCreateBlockContext)     \
   V(JSCreateModuleContext)    \
-  V(JSCreateGlobalContext)
+  V(JSCreateScriptContext)
 
 #define JS_OTHER_OP_LIST(V) \
   V(JSCallConstruct)        \
@@ -131,6 +132,7 @@
 
 // Opcodes for VirtuaMachine-level operators.
 #define SIMPLIFIED_OP_LIST(V) \
+  V(AnyToBoolean)             \
   V(BooleanNot)               \
   V(BooleanToNumber)          \
   V(NumberEqual)              \
@@ -157,9 +159,13 @@
   V(ChangeBoolToBit)          \
   V(ChangeBitToBool)          \
   V(LoadField)                \
+  V(LoadBuffer)               \
   V(LoadElement)              \
   V(StoreField)               \
-  V(StoreElement)
+  V(StoreBuffer)              \
+  V(StoreElement)             \
+  V(ObjectIsSmi)              \
+  V(ObjectIsNonNegativeSmi)
 
 // Opcodes for Machine-level operators.
 #define MACHINE_OP_LIST(V)    \
@@ -186,23 +192,26 @@
   V(Int32Sub)                 \
   V(Int32SubWithOverflow)     \
   V(Int32Mul)                 \
+  V(Int32MulHigh)             \
   V(Int32Div)                 \
-  V(Int32UDiv)                \
   V(Int32Mod)                 \
-  V(Int32UMod)                \
   V(Int32LessThan)            \
   V(Int32LessThanOrEqual)     \
+  V(Uint32Div)                \
   V(Uint32LessThan)           \
   V(Uint32LessThanOrEqual)    \
+  V(Uint32Mod)                \
+  V(Uint32MulHigh)            \
   V(Int64Add)                 \
   V(Int64Sub)                 \
   V(Int64Mul)                 \
   V(Int64Div)                 \
-  V(Int64UDiv)                \
   V(Int64Mod)                 \
-  V(Int64UMod)                \
   V(Int64LessThan)            \
   V(Int64LessThanOrEqual)     \
+  V(Uint64Div)                \
+  V(Uint64LessThan)           \
+  V(Uint64Mod)                \
   V(ChangeFloat32ToFloat64)   \
   V(ChangeFloat64ToInt32)     \
   V(ChangeFloat64ToUint32)    \
@@ -221,7 +230,14 @@
   V(Float64Sqrt)              \
   V(Float64Equal)             \
   V(Float64LessThan)          \
-  V(Float64LessThanOrEqual)
+  V(Float64LessThanOrEqual)   \
+  V(Float64Floor)             \
+  V(Float64Ceil)              \
+  V(Float64RoundTruncate)     \
+  V(Float64RoundTiesAway)     \
+  V(LoadStackPointer)         \
+  V(CheckedLoad)              \
+  V(CheckedStore)
 
 #define VALUE_OP_LIST(V) \
   COMMON_OP_LIST(V)      \
@@ -253,20 +269,11 @@
   };
 
   // Returns the mnemonic name of an opcode.
-  static const char* Mnemonic(Value val) {
-    switch (val) {
-#define RETURN_NAME(x) \
-  case k##x:           \
-    return #x;
-      ALL_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
-      default:
-        return "UnknownOpcode";
-    }
-  }
+  static char const* Mnemonic(Value value);
 
   static bool IsJsOpcode(Value val) {
     switch (val) {
+// TODO(turbofan): make this a range check.
 #define RETURN_NAME(x) \
   case k##x:           \
     return true;
@@ -279,6 +286,7 @@
 
   static bool IsControlOpcode(Value val) {
     switch (val) {
+// TODO(turbofan): make this a range check.
 #define RETURN_NAME(x) \
   case k##x:           \
     return true;
@@ -289,8 +297,22 @@
     }
   }
 
+  static bool IsLeafOpcode(Value val) {
+    switch (val) {
+// TODO(turbofan): make this a table lookup.
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      LEAF_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
   static bool IsCommonOpcode(Value val) {
     switch (val) {
+// TODO(turbofan): make this a table lookup or a range check.
 #define RETURN_NAME(x) \
   case k##x:           \
     return true;
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
deleted file mode 100644
index 9dae106..0000000
--- a/src/compiler/operator-properties-inl.h
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
-#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/opcodes.h"
-#include "src/compiler/operator-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-inline bool OperatorProperties::HasValueInput(const Operator* op) {
-  return OperatorProperties::GetValueInputCount(op) > 0;
-}
-
-inline bool OperatorProperties::HasContextInput(const Operator* op) {
-  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
-  return IrOpcode::IsJsOpcode(opcode);
-}
-
-inline bool OperatorProperties::HasEffectInput(const Operator* op) {
-  return OperatorProperties::GetEffectInputCount(op) > 0;
-}
-
-inline bool OperatorProperties::HasControlInput(const Operator* op) {
-  return OperatorProperties::GetControlInputCount(op) > 0;
-}
-
-inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
-  if (!FLAG_turbo_deoptimization) {
-    return false;
-  }
-
-  switch (op->opcode()) {
-    case IrOpcode::kFrameState:
-      return true;
-    case IrOpcode::kJSCallRuntime: {
-      Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(op);
-      return Linkage::NeedsFrameState(function);
-    }
-
-    // Strict equality cannot lazily deoptimize.
-    case IrOpcode::kJSStrictEqual:
-    case IrOpcode::kJSStrictNotEqual:
-      return false;
-
-    // Calls
-    case IrOpcode::kJSCallFunction:
-    case IrOpcode::kJSCallConstruct:
-
-    // Compare operations
-    case IrOpcode::kJSEqual:
-    case IrOpcode::kJSNotEqual:
-    case IrOpcode::kJSLessThan:
-    case IrOpcode::kJSGreaterThan:
-    case IrOpcode::kJSLessThanOrEqual:
-    case IrOpcode::kJSGreaterThanOrEqual:
-
-    // Binary operations
-    case IrOpcode::kJSBitwiseOr:
-    case IrOpcode::kJSBitwiseXor:
-    case IrOpcode::kJSBitwiseAnd:
-    case IrOpcode::kJSShiftLeft:
-    case IrOpcode::kJSShiftRight:
-    case IrOpcode::kJSShiftRightLogical:
-    case IrOpcode::kJSAdd:
-    case IrOpcode::kJSSubtract:
-    case IrOpcode::kJSMultiply:
-    case IrOpcode::kJSDivide:
-    case IrOpcode::kJSModulus:
-    case IrOpcode::kJSLoadProperty:
-    case IrOpcode::kJSStoreProperty:
-    case IrOpcode::kJSLoadNamed:
-    case IrOpcode::kJSStoreNamed:
-      return true;
-
-    default:
-      return false;
-  }
-}
-
-inline int OperatorProperties::GetValueInputCount(const Operator* op) {
-  return op->InputCount();
-}
-
-inline int OperatorProperties::GetContextInputCount(const Operator* op) {
-  return OperatorProperties::HasContextInput(op) ? 1 : 0;
-}
-
-inline int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
-  return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
-}
-
-inline int OperatorProperties::GetEffectInputCount(const Operator* op) {
-  if (op->opcode() == IrOpcode::kEffectPhi ||
-      op->opcode() == IrOpcode::kFinish) {
-    return OpParameter<int>(op);
-  }
-  if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
-    return 0;  // no effects.
-  return 1;
-}
-
-inline int OperatorProperties::GetControlInputCount(const Operator* op) {
-  switch (op->opcode()) {
-    case IrOpcode::kPhi:
-    case IrOpcode::kEffectPhi:
-    case IrOpcode::kControlEffect:
-      return 1;
-#define OPCODE_CASE(x) case IrOpcode::k##x:
-      CONTROL_OP_LIST(OPCODE_CASE)
-#undef OPCODE_CASE
-      // Control operators are Operator1<int>.
-      return OpParameter<int>(op);
-    default:
-      // Operators that have write effects must have a control
-      // dependency. Effect dependencies only ensure the correct order of
-      // write/read operations without consideration of control flow. Without an
-      // explicit control dependency writes can be float in the schedule too
-      // early along a path that shouldn't generate a side-effect.
-      return op->HasProperty(Operator::kNoWrite) ? 0 : 1;
-  }
-  return 0;
-}
-
-inline int OperatorProperties::GetTotalInputCount(const Operator* op) {
-  return GetValueInputCount(op) + GetContextInputCount(op) +
-         GetFrameStateInputCount(op) + GetEffectInputCount(op) +
-         GetControlInputCount(op);
-}
-
-// -----------------------------------------------------------------------------
-// Output properties.
-
-inline bool OperatorProperties::HasValueOutput(const Operator* op) {
-  return GetValueOutputCount(op) > 0;
-}
-
-inline bool OperatorProperties::HasEffectOutput(const Operator* op) {
-  return op->opcode() == IrOpcode::kStart ||
-         op->opcode() == IrOpcode::kControlEffect ||
-         op->opcode() == IrOpcode::kValueEffect ||
-         (op->opcode() != IrOpcode::kFinish && GetEffectInputCount(op) > 0);
-}
-
-inline bool OperatorProperties::HasControlOutput(const Operator* op) {
-  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
-  return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode));
-}
-
-
-inline int OperatorProperties::GetValueOutputCount(const Operator* op) {
-  return op->OutputCount();
-}
-
-inline int OperatorProperties::GetEffectOutputCount(const Operator* op) {
-  return HasEffectOutput(op) ? 1 : 0;
-}
-
-inline int OperatorProperties::GetControlOutputCount(const Operator* node) {
-  return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
-                                                                          : 0;
-}
-
-
-inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
-  uint8_t opcode = op->opcode();
-  return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
-         opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
-         opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
-         opcode == IrOpcode::kIfFalse;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
new file mode 100644
index 0000000..abfc5fd
--- /dev/null
+++ b/src/compiler/operator-properties.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operator-properties.h"
+
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+bool OperatorProperties::HasContextInput(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return IrOpcode::IsJsOpcode(opcode);
+}
+
+
+// static
+bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+  if (!FLAG_turbo_deoptimization) {
+    return false;
+  }
+  switch (op->opcode()) {
+    case IrOpcode::kFrameState:
+      return true;
+    case IrOpcode::kJSCallRuntime: {
+      const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
+      return Linkage::NeedsFrameState(p.id());
+    }
+
+    // Strict equality cannot lazily deoptimize.
+    case IrOpcode::kJSStrictEqual:
+    case IrOpcode::kJSStrictNotEqual:
+      return false;
+
+    // Calls
+    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallConstruct:
+
+    // Compare operations
+    case IrOpcode::kJSEqual:
+    case IrOpcode::kJSGreaterThan:
+    case IrOpcode::kJSGreaterThanOrEqual:
+    case IrOpcode::kJSHasProperty:
+    case IrOpcode::kJSInstanceOf:
+    case IrOpcode::kJSLessThan:
+    case IrOpcode::kJSLessThanOrEqual:
+    case IrOpcode::kJSNotEqual:
+
+    // Binary operations
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSLoadNamed:
+    case IrOpcode::kJSLoadProperty:
+    case IrOpcode::kJSModulus:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+    case IrOpcode::kJSStoreNamed:
+    case IrOpcode::kJSStoreProperty:
+    case IrOpcode::kJSSubtract:
+
+    // Conversions
+    case IrOpcode::kJSToObject:
+
+    // Other
+    case IrOpcode::kJSDeleteProperty:
+      return true;
+
+    default:
+      return false;
+  }
+}
+
+
+// static
+int OperatorProperties::GetTotalInputCount(const Operator* op) {
+  return op->ValueInputCount() + GetContextInputCount(op) +
+         GetFrameStateInputCount(op) + op->EffectInputCount() +
+         op->ControlInputCount();
+}
+
+
+// static
+bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
+  Operator::Opcode const opcode = op->opcode();
+  return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
+         opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
+         opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
+         opcode == IrOpcode::kIfFalse;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
index 718eea0..37c9755 100644
--- a/src/compiler/operator-properties.h
+++ b/src/compiler/operator-properties.h
@@ -5,36 +5,33 @@
 #ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
 #define V8_COMPILER_OPERATOR_PROPERTIES_H_
 
+#include "src/base/macros.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
 class Operator;
 
-class OperatorProperties {
+
+class OperatorProperties FINAL {
  public:
-  static inline bool HasValueInput(const Operator* op);
-  static inline bool HasContextInput(const Operator* op);
-  static inline bool HasEffectInput(const Operator* op);
-  static inline bool HasControlInput(const Operator* op);
-  static inline bool HasFrameStateInput(const Operator* op);
+  static bool HasContextInput(const Operator* op);
+  static bool HasFrameStateInput(const Operator* op);
 
-  static inline int GetValueInputCount(const Operator* op);
-  static inline int GetContextInputCount(const Operator* op);
-  static inline int GetEffectInputCount(const Operator* op);
-  static inline int GetControlInputCount(const Operator* op);
-  static inline int GetFrameStateInputCount(const Operator* op);
-  static inline int GetTotalInputCount(const Operator* op);
+  static int GetContextInputCount(const Operator* op) {
+    return HasContextInput(op) ? 1 : 0;
+  }
+  static int GetFrameStateInputCount(const Operator* op) {
+    return HasFrameStateInput(op) ? 1 : 0;
+  }
+  static int GetTotalInputCount(const Operator* op);
 
-  static inline bool HasValueOutput(const Operator* op);
-  static inline bool HasEffectOutput(const Operator* op);
-  static inline bool HasControlOutput(const Operator* op);
+  static bool IsBasicBlockBegin(const Operator* op);
 
-  static inline int GetValueOutputCount(const Operator* op);
-  static inline int GetEffectOutputCount(const Operator* op);
-  static inline int GetControlOutputCount(const Operator* op);
-
-  static inline bool IsBasicBlockBegin(const Operator* op);
+ private:
+  DISALLOW_COPY_AND_ASSIGN(OperatorProperties);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
index 35f9c88..c8687f4 100644
--- a/src/compiler/operator.cc
+++ b/src/compiler/operator.cc
@@ -4,22 +4,41 @@
 
 #include "src/compiler/operator.h"
 
+#include <limits>
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-Operator::~Operator() {}
+
+template <typename N>
+static inline N CheckRange(size_t val) {
+  CHECK(val <= std::numeric_limits<N>::max());
+  return static_cast<N>(val);
+}
 
 
-SimpleOperator::SimpleOperator(Opcode opcode, Properties properties,
-                               int input_count, int output_count,
-                               const char* mnemonic)
-    : Operator(opcode, properties, mnemonic),
-      input_count_(input_count),
-      output_count_(output_count) {}
+Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
+                   size_t value_in, size_t effect_in, size_t control_in,
+                   size_t value_out, size_t effect_out, size_t control_out)
+    : opcode_(opcode),
+      properties_(properties),
+      mnemonic_(mnemonic),
+      value_in_(CheckRange<uint32_t>(value_in)),
+      effect_in_(CheckRange<uint16_t>(effect_in)),
+      control_in_(CheckRange<uint16_t>(control_in)),
+      value_out_(CheckRange<uint16_t>(value_out)),
+      effect_out_(CheckRange<uint8_t>(effect_out)),
+      control_out_(CheckRange<uint8_t>(control_out)) {}
 
 
-SimpleOperator::~SimpleOperator() {}
+std::ostream& operator<<(std::ostream& os, const Operator& op) {
+  op.PrintTo(os);
+  return os;
+}
+
+
+void Operator::PrintTo(std::ostream& os) const { os << mnemonic(); }
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index 5137806..fb144ce 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -5,9 +5,11 @@
 #ifndef V8_COMPILER_OPERATOR_H_
 #define V8_COMPILER_OPERATOR_H_
 
+#include <ostream>  // NOLINT(readability/streams)
+
 #include "src/base/flags.h"
-#include "src/ostreams.h"
-#include "src/unique.h"
+#include "src/base/functional.h"
+#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -47,9 +49,12 @@
   };
   typedef base::Flags<Property, uint8_t> Properties;
 
-  Operator(Opcode opcode, Properties properties, const char* mnemonic)
-      : opcode_(opcode), properties_(properties), mnemonic_(mnemonic) {}
-  virtual ~Operator();
+  // Constructor.
+  Operator(Opcode opcode, Properties properties, const char* mnemonic,
+           size_t value_in, size_t effect_in, size_t control_in,
+           size_t value_out, size_t effect_out, size_t control_out);
+
+  virtual ~Operator() {}
 
   // A small integer unique to all instances of a particular kind of operator,
   // useful for quick matching for specific kinds of operators. For fast access
@@ -63,196 +68,122 @@
   // Check if this operator equals another operator. Equivalent operators can
   // be merged, and nodes with equivalent operators and equivalent inputs
   // can be merged.
-  virtual bool Equals(const Operator* other) const = 0;
+  virtual bool Equals(const Operator* that) const {
+    return this->opcode() == that->opcode();
+  }
 
   // Compute a hashcode to speed up equivalence-set checking.
   // Equal operators should always have equal hashcodes, and unequal operators
   // should have unequal hashcodes with high probability.
-  virtual int HashCode() const = 0;
+  virtual size_t HashCode() const { return base::hash<Opcode>()(opcode()); }
 
   // Check whether this operator has the given property.
   bool HasProperty(Property property) const {
     return (properties() & property) == property;
   }
 
-  // Number of data inputs to the operator, for verifying graph structure.
-  virtual int InputCount() const = 0;
-
-  // Number of data outputs from the operator, for verifying graph structure.
-  virtual int OutputCount() const = 0;
-
   Properties properties() const { return properties_; }
 
+  // TODO(titzer): convert return values here to size_t.
+  int ValueInputCount() const { return value_in_; }
+  int EffectInputCount() const { return effect_in_; }
+  int ControlInputCount() const { return control_in_; }
+
+  int ValueOutputCount() const { return value_out_; }
+  int EffectOutputCount() const { return effect_out_; }
+  int ControlOutputCount() const { return control_out_; }
+
+  static inline size_t ZeroIfPure(Properties properties) {
+    return (properties & kPure) == kPure ? 0 : 1;
+  }
+
   // TODO(titzer): API for input and output types, for typechecking graph.
  protected:
   // Print the full operator into the given stream, including any
   // static parameters. Useful for debugging and visualizing the IR.
-  virtual OStream& PrintTo(OStream& os) const = 0;  // NOLINT
-  friend OStream& operator<<(OStream& os, const Operator& op);
+  virtual void PrintTo(std::ostream& os) const;
+  friend std::ostream& operator<<(std::ostream& os, const Operator& op);
 
  private:
   Opcode opcode_;
   Properties properties_;
   const char* mnemonic_;
+  uint32_t value_in_;
+  uint16_t effect_in_;
+  uint16_t control_in_;
+  uint16_t value_out_;
+  uint8_t effect_out_;
+  uint8_t control_out_;
 
   DISALLOW_COPY_AND_ASSIGN(Operator);
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
 
-OStream& operator<<(OStream& os, const Operator& op);
+std::ostream& operator<<(std::ostream& os, const Operator& op);
 
-// An implementation of Operator that has no static parameters. Such operators
-// have just a name, an opcode, and a fixed number of inputs and outputs.
-// They can represented by singletons and shared globally.
-class SimpleOperator : public Operator {
- public:
-  SimpleOperator(Opcode opcode, Properties properties, int input_count,
-                 int output_count, const char* mnemonic);
-  ~SimpleOperator();
-
-  virtual bool Equals(const Operator* that) const FINAL {
-    return opcode() == that->opcode();
-  }
-  virtual int HashCode() const FINAL { return opcode(); }
-  virtual int InputCount() const FINAL { return input_count_; }
-  virtual int OutputCount() const FINAL { return output_count_; }
-
- private:
-  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
-    return os << mnemonic();
-  }
-
-  int input_count_;
-  int output_count_;
-
-  DISALLOW_COPY_AND_ASSIGN(SimpleOperator);
-};
-
-// Template specialization implements a kind of type class for dealing with the
-// static parameters of Operator1 automatically.
-template <typename T>
-struct StaticParameterTraits {
-  static OStream& PrintTo(OStream& os, T val) {  // NOLINT
-    return os << "??";
-  }
-  static int HashCode(T a) { return 0; }
-  static bool Equals(T a, T b) {
-    return false;  // Not every T has a ==. By default, be conservative.
-  }
-};
-
-// Specialization for static parameters of type {int}.
-template <>
-struct StaticParameterTraits<int> {
-  static OStream& PrintTo(OStream& os, int val) {  // NOLINT
-    return os << val;
-  }
-  static int HashCode(int a) { return a; }
-  static bool Equals(int a, int b) { return a == b; }
-};
-
-// Specialization for static parameters of type {double}.
-template <>
-struct StaticParameterTraits<double> {
-  static OStream& PrintTo(OStream& os, double val) {  // NOLINT
-    return os << val;
-  }
-  static int HashCode(double a) {
-    return static_cast<int>(bit_cast<int64_t>(a));
-  }
-  static bool Equals(double a, double b) {
-    return bit_cast<int64_t>(a) == bit_cast<int64_t>(b);
-  }
-};
-
-// Specialization for static parameters of type {Unique<Object>}.
-template <>
-struct StaticParameterTraits<Unique<Object> > {
-  static OStream& PrintTo(OStream& os, Unique<Object> val) {  // NOLINT
-    return os << Brief(*val.handle());
-  }
-  static int HashCode(Unique<Object> a) {
-    return static_cast<int>(a.Hashcode());
-  }
-  static bool Equals(Unique<Object> a, Unique<Object> b) { return a == b; }
-};
-
-// Specialization for static parameters of type {Unique<Name>}.
-template <>
-struct StaticParameterTraits<Unique<Name> > {
-  static OStream& PrintTo(OStream& os, Unique<Name> val) {  // NOLINT
-    return os << Brief(*val.handle());
-  }
-  static int HashCode(Unique<Name> a) { return static_cast<int>(a.Hashcode()); }
-  static bool Equals(Unique<Name> a, Unique<Name> b) { return a == b; }
-};
-
-#if DEBUG
-// Specialization for static parameters of type {Handle<Object>} to prevent any
-// direct usage of Handles in constants.
-template <>
-struct StaticParameterTraits<Handle<Object> > {
-  static OStream& PrintTo(OStream& os, Handle<Object> val) {  // NOLINT
-    UNREACHABLE();  // Should use Unique<Object> instead
-    return os;
-  }
-  static int HashCode(Handle<Object> a) {
-    UNREACHABLE();  // Should use Unique<Object> instead
-    return 0;
-  }
-  static bool Equals(Handle<Object> a, Handle<Object> b) {
-    UNREACHABLE();  // Should use Unique<Object> instead
-    return false;
-  }
-};
-#endif
 
 // A templatized implementation of Operator that has one static parameter of
-// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then
-// operators of this kind can automatically be hashed, compared, and printed.
-template <typename T>
+// type {T}.
+template <typename T, typename Pred = std::equal_to<T>,
+          typename Hash = base::hash<T>>
 class Operator1 : public Operator {
  public:
-  Operator1(Opcode opcode, Properties properties, int input_count,
-            int output_count, const char* mnemonic, T parameter)
-      : Operator(opcode, properties, mnemonic),
-        input_count_(input_count),
-        output_count_(output_count),
-        parameter_(parameter) {}
+  Operator1(Opcode opcode, Properties properties, const char* mnemonic,
+            size_t value_in, size_t effect_in, size_t control_in,
+            size_t value_out, size_t effect_out, size_t control_out,
+            T parameter, Pred const& pred = Pred(), Hash const& hash = Hash())
+      : Operator(opcode, properties, mnemonic, value_in, effect_in, control_in,
+                 value_out, effect_out, control_out),
+        parameter_(parameter),
+        pred_(pred),
+        hash_(hash) {}
 
-  const T& parameter() const { return parameter_; }
+  T const& parameter() const { return parameter_; }
 
-  virtual bool Equals(const Operator* other) const OVERRIDE {
+  bool Equals(const Operator* other) const FINAL {
     if (opcode() != other->opcode()) return false;
     const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
-    return StaticParameterTraits<T>::Equals(this->parameter_, that->parameter_);
+    return this->pred_(this->parameter(), that->parameter());
   }
-  virtual int HashCode() const OVERRIDE {
-    return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
+  size_t HashCode() const FINAL {
+    return base::hash_combine(this->opcode(), this->hash_(this->parameter()));
   }
-  virtual int InputCount() const OVERRIDE { return input_count_; }
-  virtual int OutputCount() const OVERRIDE { return output_count_; }
-  virtual OStream& PrintParameter(OStream& os) const {  // NOLINT
-    return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
+  virtual void PrintParameter(std::ostream& os) const {
+    os << "[" << this->parameter() << "]";
   }
 
  protected:
-  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
-    return PrintParameter(os << mnemonic());
+  void PrintTo(std::ostream& os) const FINAL {
+    os << mnemonic();
+    PrintParameter(os);
   }
 
  private:
-  int input_count_;
-  int output_count_;
-  T parameter_;
+  T const parameter_;
+  Pred const pred_;
+  Hash const hash_;
 };
 
 
 // Helper to extract parameters from Operator1<*> operator.
 template <typename T>
-static inline const T& OpParameter(const Operator* op) {
-  return reinterpret_cast<const Operator1<T>*>(op)->parameter();
+inline T const& OpParameter(const Operator* op) {
+  return static_cast<const Operator1<T>*>(op)->parameter();
+}
+
+// NOTE: We have to be careful to use the right equal/hash functions below, for
+// float/double we always use the ones operating on the bit level.
+template <>
+inline float const& OpParameter(const Operator* op) {
+  return static_cast<const Operator1<float, base::bit_equal_to<float>,
+                                     base::bit_hash<float>>*>(op)->parameter();
+}
+
+template <>
+inline double const& OpParameter(const Operator* op) {
+  return static_cast<const Operator1<double, base::bit_equal_to<double>,
+                                     base::bit_hash<double>>*>(op)->parameter();
 }
 
 }  // namespace compiler
diff --git a/src/compiler/phi-reducer.h b/src/compiler/phi-reducer.h
deleted file mode 100644
index 5870d04..0000000
--- a/src/compiler/phi-reducer.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_PHI_REDUCER_H_
-#define V8_COMPILER_PHI_REDUCER_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Replaces redundant phis if all the inputs are the same or the phi itself.
-class PhiReducer FINAL : public Reducer {
- public:
-  virtual Reduction Reduce(Node* node) OVERRIDE {
-    if (node->opcode() != IrOpcode::kPhi &&
-        node->opcode() != IrOpcode::kEffectPhi)
-      return NoChange();
-
-    int n = node->op()->InputCount();
-    if (n == 1) return Replace(node->InputAt(0));
-
-    Node* replacement = NULL;
-    Node::Inputs inputs = node->inputs();
-    for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
-      Node* input = *it;
-      if (input != node && input != replacement) {
-        if (replacement != NULL) return NoChange();
-        replacement = input;
-      }
-    }
-    DCHECK_NE(node, replacement);
-    return Replace(replacement);
-  }
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_PHI_REDUCER_H_
diff --git a/src/compiler/pipeline-statistics.cc b/src/compiler/pipeline-statistics.cc
new file mode 100644
index 0000000..e58c396
--- /dev/null
+++ b/src/compiler/pipeline-statistics.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline-statistics.h"
+#include "src/compiler/zone-pool.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void PipelineStatistics::CommonStats::Begin(
+    PipelineStatistics* pipeline_stats) {
+  DCHECK(scope_.is_empty());
+  scope_.Reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
+  timer_.Start();
+  outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
+  allocated_bytes_at_start_ =
+      outer_zone_initial_size_ -
+      pipeline_stats->total_stats_.outer_zone_initial_size_ +
+      pipeline_stats->zone_pool_->GetCurrentAllocatedBytes();
+}
+
+
+void PipelineStatistics::CommonStats::End(
+    PipelineStatistics* pipeline_stats,
+    CompilationStatistics::BasicStats* diff) {
+  DCHECK(!scope_.is_empty());
+  diff->function_name_ = pipeline_stats->function_name_;
+  diff->delta_ = timer_.Elapsed();
+  size_t outer_zone_diff =
+      pipeline_stats->OuterZoneSize() - outer_zone_initial_size_;
+  diff->max_allocated_bytes_ = outer_zone_diff + scope_->GetMaxAllocatedBytes();
+  diff->absolute_max_allocated_bytes_ =
+      diff->max_allocated_bytes_ + allocated_bytes_at_start_;
+  diff->total_allocated_bytes_ =
+      outer_zone_diff + scope_->GetTotalAllocatedBytes();
+  scope_.Reset(NULL);
+  timer_.Stop();
+}
+
+
+PipelineStatistics::PipelineStatistics(CompilationInfo* info,
+                                       ZonePool* zone_pool)
+    : isolate_(info->zone()->isolate()),
+      outer_zone_(info->zone()),
+      zone_pool_(zone_pool),
+      compilation_stats_(isolate_->GetTurboStatistics()),
+      source_size_(0),
+      phase_kind_name_(NULL),
+      phase_name_(NULL) {
+  if (!info->shared_info().is_null()) {
+    source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
+    SmartArrayPointer<char> name =
+        info->shared_info()->DebugName()->ToCString();
+    function_name_ = name.get();
+  }
+  total_stats_.Begin(this);
+}
+
+
+PipelineStatistics::~PipelineStatistics() {
+  if (InPhaseKind()) EndPhaseKind();
+  CompilationStatistics::BasicStats diff;
+  total_stats_.End(this, &diff);
+  compilation_stats_->RecordTotalStats(source_size_, diff);
+}
+
+
+void PipelineStatistics::BeginPhaseKind(const char* phase_kind_name) {
+  DCHECK(!InPhase());
+  if (InPhaseKind()) EndPhaseKind();
+  phase_kind_name_ = phase_kind_name;
+  phase_kind_stats_.Begin(this);
+}
+
+
+void PipelineStatistics::EndPhaseKind() {
+  DCHECK(!InPhase());
+  CompilationStatistics::BasicStats diff;
+  phase_kind_stats_.End(this, &diff);
+  compilation_stats_->RecordPhaseKindStats(phase_kind_name_, diff);
+}
+
+
+void PipelineStatistics::BeginPhase(const char* name) {
+  DCHECK(InPhaseKind());
+  phase_name_ = name;
+  phase_stats_.Begin(this);
+}
+
+
+void PipelineStatistics::EndPhase() {
+  DCHECK(InPhaseKind());
+  CompilationStatistics::BasicStats diff;
+  phase_stats_.End(this, &diff);
+  compilation_stats_->RecordPhaseStats(phase_kind_name_, phase_name_, diff);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
new file mode 100644
index 0000000..01cc9de
--- /dev/null
+++ b/src/compiler/pipeline-statistics.h
@@ -0,0 +1,95 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PIPELINE_STATISTICS_H_
+#define V8_COMPILER_PIPELINE_STATISTICS_H_
+
+#include <string>
+
+#include "src/compilation-statistics.h"
+#include "src/compiler/zone-pool.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class PhaseScope;
+
+class PipelineStatistics : public Malloced {
+ public:
+  PipelineStatistics(CompilationInfo* info, ZonePool* zone_pool);
+  ~PipelineStatistics();
+
+  void BeginPhaseKind(const char* phase_kind_name);
+
+ private:
+  size_t OuterZoneSize() {
+    return static_cast<size_t>(outer_zone_->allocation_size());
+  }
+
+  class CommonStats {
+   public:
+    CommonStats() : outer_zone_initial_size_(0) {}
+
+    void Begin(PipelineStatistics* pipeline_stats);
+    void End(PipelineStatistics* pipeline_stats,
+             CompilationStatistics::BasicStats* diff);
+
+    SmartPointer<ZonePool::StatsScope> scope_;
+    base::ElapsedTimer timer_;
+    size_t outer_zone_initial_size_;
+    size_t allocated_bytes_at_start_;
+  };
+
+  bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
+  void EndPhaseKind();
+
+  friend class PhaseScope;
+  bool InPhase() { return !phase_stats_.scope_.is_empty(); }
+  void BeginPhase(const char* name);
+  void EndPhase();
+
+  Isolate* isolate_;
+  Zone* outer_zone_;
+  ZonePool* zone_pool_;
+  CompilationStatistics* compilation_stats_;
+  std::string function_name_;
+
+  // Stats for the entire compilation.
+  CommonStats total_stats_;
+  size_t source_size_;
+
+  // Stats for phase kind.
+  const char* phase_kind_name_;
+  CommonStats phase_kind_stats_;
+
+  // Stats for phase.
+  const char* phase_name_;
+  CommonStats phase_stats_;
+
+  DISALLOW_COPY_AND_ASSIGN(PipelineStatistics);
+};
+
+
+class PhaseScope {
+ public:
+  PhaseScope(PipelineStatistics* pipeline_stats, const char* name)
+      : pipeline_stats_(pipeline_stats) {
+    if (pipeline_stats_ != NULL) pipeline_stats_->BeginPhase(name);
+  }
+  ~PhaseScope() {
+    if (pipeline_stats_ != NULL) pipeline_stats_->EndPhase();
+  }
+
+ private:
+  PipelineStatistics* const pipeline_stats_;
+
+  DISALLOW_COPY_AND_ASSIGN(PhaseScope);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 9889b6a..c7432c6 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -4,29 +4,42 @@
 
 #include "src/compiler/pipeline.h"
 
+#include <fstream>  // NOLINT(readability/streams)
+#include <sstream>
+
 #include "src/base/platform/elapsed-timer.h"
 #include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/compiler/basic-block-instrumentor.h"
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/control-reducer.h"
 #include "src/compiler/graph-replay.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-context-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-inlining.h"
 #include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/jump-threading.h"
+#include "src/compiler/load-elimination.h"
 #include "src/compiler/machine-operator-reducer.h"
-#include "src/compiler/phi-reducer.h"
+#include "src/compiler/move-optimizer.h"
+#include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
+#include "src/compiler/register-allocator-verifier.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
+#include "src/compiler/select-lowering.h"
 #include "src/compiler/simplified-lowering.h"
 #include "src/compiler/simplified-operator-reducer.h"
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
 #include "src/compiler/verifier.h"
-#include "src/hydrogen.h"
+#include "src/compiler/zone-pool.h"
 #include "src/ostreams.h"
 #include "src/utils.h"
 
@@ -34,47 +47,202 @@
 namespace internal {
 namespace compiler {
 
-class PhaseStats {
+class PipelineData {
  public:
-  enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN };
+  explicit PipelineData(ZonePool* zone_pool, CompilationInfo* info)
+      : isolate_(info->zone()->isolate()),
+        info_(info),
+        outer_zone_(nullptr),
+        zone_pool_(zone_pool),
+        pipeline_statistics_(nullptr),
+        compilation_failed_(false),
+        code_(Handle<Code>::null()),
+        graph_zone_scope_(zone_pool_),
+        graph_zone_(nullptr),
+        graph_(nullptr),
+        loop_assignment_(nullptr),
+        machine_(nullptr),
+        common_(nullptr),
+        javascript_(nullptr),
+        jsgraph_(nullptr),
+        typer_(nullptr),
+        context_node_(nullptr),
+        schedule_(nullptr),
+        instruction_zone_scope_(zone_pool_),
+        instruction_zone_(nullptr),
+        sequence_(nullptr),
+        frame_(nullptr),
+        register_allocator_(nullptr) {}
 
-  PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name)
-      : info_(info),
-        kind_(kind),
-        name_(name),
-        size_(info->zone()->allocation_size()) {
-    if (FLAG_turbo_stats) {
-      timer_.Start();
-    }
+  ~PipelineData() {
+    DeleteInstructionZone();
+    DeleteGraphZone();
   }
 
-  ~PhaseStats() {
-    if (FLAG_turbo_stats) {
-      base::TimeDelta delta = timer_.Elapsed();
-      size_t bytes = info_->zone()->allocation_size() - size_;
-      HStatistics* stats = info_->isolate()->GetTStatistics();
-      stats->SaveTiming(name_, delta, static_cast<int>(bytes));
+  // For main entry point.
+  void Initialize(PipelineStatistics* pipeline_statistics) {
+    PhaseScope scope(pipeline_statistics, "init pipeline data");
+    outer_zone_ = info()->zone();
+    pipeline_statistics_ = pipeline_statistics;
+    graph_zone_ = graph_zone_scope_.zone();
+    graph_ = new (graph_zone()) Graph(graph_zone());
+    source_positions_.Reset(new SourcePositionTable(graph()));
+    machine_ = new (graph_zone()) MachineOperatorBuilder(
+        graph_zone(), kMachPtr,
+        InstructionSelector::SupportedMachineOperatorFlags());
+    common_ = new (graph_zone()) CommonOperatorBuilder(graph_zone());
+    javascript_ = new (graph_zone()) JSOperatorBuilder(graph_zone());
+    jsgraph_ =
+        new (graph_zone()) JSGraph(graph(), common(), javascript(), machine());
+    typer_.Reset(new Typer(graph(), info()->context()));
+    instruction_zone_ = instruction_zone_scope_.zone();
+  }
 
-      switch (kind_) {
-        case CREATE_GRAPH:
-          stats->IncrementCreateGraph(delta);
-          break;
-        case OPTIMIZATION:
-          stats->IncrementOptimizeGraph(delta);
-          break;
-        case CODEGEN:
-          stats->IncrementGenerateCode(delta);
-          break;
-      }
-    }
+  // For machine graph testing entry point.
+  void InitializeTorTesting(Graph* graph, Schedule* schedule) {
+    graph_ = graph;
+    source_positions_.Reset(new SourcePositionTable(graph));
+    schedule_ = schedule;
+    instruction_zone_ = instruction_zone_scope_.zone();
+  }
+
+  // For register allocation testing entry point.
+  void InitializeTorTesting(InstructionSequence* sequence) {
+    instruction_zone_ = sequence->zone();
+    sequence_ = sequence;
+  }
+
+  Isolate* isolate() const { return isolate_; }
+  CompilationInfo* info() const { return info_; }
+  ZonePool* zone_pool() const { return zone_pool_; }
+  PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
+  bool compilation_failed() const { return compilation_failed_; }
+  void set_compilation_failed() { compilation_failed_ = true; }
+  Handle<Code> code() { return code_; }
+  void set_code(Handle<Code> code) {
+    DCHECK(code_.is_null());
+    code_ = code;
+  }
+
+  // RawMachineAssembler generally produces graphs which cannot be verified.
+  bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
+
+  Zone* graph_zone() const { return graph_zone_; }
+  Graph* graph() const { return graph_; }
+  SourcePositionTable* source_positions() const {
+    return source_positions_.get();
+  }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  CommonOperatorBuilder* common() const { return common_; }
+  JSOperatorBuilder* javascript() const { return javascript_; }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Typer* typer() const { return typer_.get(); }
+
+  LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
+  void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
+    DCHECK_EQ(nullptr, loop_assignment_);
+    loop_assignment_ = loop_assignment;
+  }
+
+  Node* context_node() const { return context_node_; }
+  void set_context_node(Node* context_node) {
+    DCHECK_EQ(nullptr, context_node_);
+    context_node_ = context_node;
+  }
+
+  Schedule* schedule() const { return schedule_; }
+  void set_schedule(Schedule* schedule) {
+    DCHECK_EQ(nullptr, schedule_);
+    schedule_ = schedule;
+  }
+
+  Zone* instruction_zone() const { return instruction_zone_; }
+  InstructionSequence* sequence() const { return sequence_; }
+  Frame* frame() const { return frame_; }
+  RegisterAllocator* register_allocator() const { return register_allocator_; }
+
+  void DeleteGraphZone() {
+    // Destroy objects with destructors first.
+    source_positions_.Reset(nullptr);
+    typer_.Reset(nullptr);
+    if (graph_zone_ == nullptr) return;
+    // Destroy zone and clear pointers.
+    graph_zone_scope_.Destroy();
+    graph_zone_ = nullptr;
+    graph_ = nullptr;
+    loop_assignment_ = nullptr;
+    machine_ = nullptr;
+    common_ = nullptr;
+    javascript_ = nullptr;
+    jsgraph_ = nullptr;
+    context_node_ = nullptr;
+    schedule_ = nullptr;
+  }
+
+  void DeleteInstructionZone() {
+    if (instruction_zone_ == nullptr) return;
+    instruction_zone_scope_.Destroy();
+    instruction_zone_ = nullptr;
+    sequence_ = nullptr;
+    frame_ = nullptr;
+    register_allocator_ = nullptr;
+  }
+
+  void InitializeInstructionSequence() {
+    DCHECK_EQ(nullptr, sequence_);
+    InstructionBlocks* instruction_blocks =
+        InstructionSequence::InstructionBlocksFor(instruction_zone(),
+                                                  schedule());
+    sequence_ = new (instruction_zone())
+        InstructionSequence(instruction_zone(), instruction_blocks);
+  }
+
+  void InitializeRegisterAllocator(Zone* local_zone,
+                                   const RegisterConfiguration* config,
+                                   const char* debug_name) {
+    DCHECK_EQ(nullptr, register_allocator_);
+    DCHECK_EQ(nullptr, frame_);
+    frame_ = new (instruction_zone()) Frame();
+    register_allocator_ = new (instruction_zone())
+        RegisterAllocator(config, local_zone, frame(), sequence(), debug_name);
   }
 
  private:
+  Isolate* isolate_;
   CompilationInfo* info_;
-  PhaseKind kind_;
-  const char* name_;
-  size_t size_;
-  base::ElapsedTimer timer_;
+  Zone* outer_zone_;
+  ZonePool* const zone_pool_;
+  PipelineStatistics* pipeline_statistics_;
+  bool compilation_failed_;
+  Handle<Code> code_;
+
+  // All objects in the following group of fields are allocated in graph_zone_.
+  // They are all set to NULL when the graph_zone_ is destroyed.
+  ZonePool::Scope graph_zone_scope_;
+  Zone* graph_zone_;
+  Graph* graph_;
+  // TODO(dcarney): make this into a ZoneObject.
+  SmartPointer<SourcePositionTable> source_positions_;
+  LoopAssignmentAnalysis* loop_assignment_;
+  MachineOperatorBuilder* machine_;
+  CommonOperatorBuilder* common_;
+  JSOperatorBuilder* javascript_;
+  JSGraph* jsgraph_;
+  // TODO(dcarney): make this into a ZoneObject.
+  SmartPointer<Typer> typer_;
+  Node* context_node_;
+  Schedule* schedule_;
+
+  // All objects in the following group of fields are allocated in
+  // instruction_zone_.  They are all set to NULL when the instruction_zone_ is
+  // destroyed.
+  ZonePool::Scope instruction_zone_scope_;
+  Zone* instruction_zone_;
+  InstructionSequence* sequence_;
+  Frame* frame_;
+  RegisterAllocator* register_allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
 
@@ -87,41 +255,46 @@
 }
 
 
-void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
-  if (FLAG_trace_turbo) {
-    char buffer[256];
-    Vector<char> filename(buffer, sizeof(buffer));
-    if (!info_->shared_info().is_null()) {
-      SmartArrayPointer<char> functionname =
-          info_->shared_info()->DebugName()->ToCString();
-      if (strlen(functionname.get()) > 0) {
-        SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase);
-      } else {
-        SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase);
-      }
-    } else {
-      SNPrintF(filename, "turbo-none-%s.dot", phase);
-    }
-    std::replace(filename.start(), filename.start() + filename.length(), ' ',
-                 '_');
-    FILE* file = base::OS::FOpen(filename.start(), "w+");
-    OFStream of(file);
-    of << AsDOT(*graph);
-    fclose(file);
+struct TurboCfgFile : public std::ofstream {
+  explicit TurboCfgFile(Isolate* isolate)
+      : std::ofstream(isolate->GetTurboCfgFileName().c_str(),
+                      std::ios_base::app) {}
+};
 
-    OFStream os(stdout);
-    os << "-- " << phase << " graph printed to file " << filename.start()
-       << "\n";
+
+static void TraceSchedule(Schedule* schedule) {
+  if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
+  OFStream os(stdout);
+  os << "-- Schedule --------------------------------------\n" << *schedule;
+}
+
+
+static SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
+  SmartArrayPointer<char> name;
+  if (info->IsStub()) {
+    if (info->code_stub() != NULL) {
+      CodeStub::Major major_key = info->code_stub()->MajorKey();
+      const char* major_name = CodeStub::MajorName(major_key, false);
+      size_t len = strlen(major_name);
+      name.Reset(new char[len]);
+      memcpy(name.get(), major_name, len);
+    }
+  } else {
+    AllowHandleDereference allow_deref;
+    name = info->function()->debug_name()->ToCString();
   }
-  if (VerifyGraphs()) Verifier::Run(graph);
+  return name;
 }
 
 
 class AstGraphBuilderWithPositions : public AstGraphBuilder {
  public:
-  explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
-                                        SourcePositionTable* source_positions)
-      : AstGraphBuilder(info, jsgraph), source_positions_(source_positions) {}
+  AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
+                               JSGraph* jsgraph,
+                               LoopAssignmentAnalysis* loop_assignment,
+                               SourcePositionTable* source_positions)
+      : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
+        source_positions_(source_positions) {}
 
   bool CreateGraph() {
     SourcePositionTable::Scope pos(source_positions_,
@@ -130,7 +303,7 @@
   }
 
 #define DEF_VISIT(type)                                               \
-  virtual void Visit##type(type* node) OVERRIDE {                  \
+  void Visit##type(type* node) OVERRIDE {                             \
     SourcePositionTable::Scope pos(source_positions_,                 \
                                    SourcePosition(node->position())); \
     AstGraphBuilder::Visit##type(node);                               \
@@ -138,220 +311,613 @@
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  Node* GetFunctionContext() { return AstGraphBuilder::GetFunctionContext(); }
+
  private:
   SourcePositionTable* source_positions_;
 };
 
 
-static void TraceSchedule(Schedule* schedule) {
-  if (!FLAG_trace_turbo) return;
-  OFStream os(stdout);
-  os << "-- Schedule --------------------------------------\n" << *schedule;
+class PipelineRunScope {
+ public:
+  PipelineRunScope(PipelineData* data, const char* phase_name)
+      : phase_scope_(
+            phase_name == nullptr ? nullptr : data->pipeline_statistics(),
+            phase_name),
+        zone_scope_(data->zone_pool()) {}
+
+  Zone* zone() { return zone_scope_.zone(); }
+
+ private:
+  PhaseScope phase_scope_;
+  ZonePool::Scope zone_scope_;
+};
+
+
+template <typename Phase>
+void Pipeline::Run() {
+  PipelineRunScope scope(this->data_, Phase::phase_name());
+  Phase phase;
+  phase.Run(this->data_, scope.zone());
+}
+
+
+template <typename Phase, typename Arg0>
+void Pipeline::Run(Arg0 arg_0) {
+  PipelineRunScope scope(this->data_, Phase::phase_name());
+  Phase phase;
+  phase.Run(this->data_, scope.zone(), arg_0);
+}
+
+
+struct LoopAssignmentAnalysisPhase {
+  static const char* phase_name() { return "loop assignment analysis"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
+    LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
+    data->set_loop_assignment(loop_assignment);
+  }
+};
+
+
+struct GraphBuilderPhase {
+  static const char* phase_name() { return "graph builder"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    AstGraphBuilderWithPositions graph_builder(
+        temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
+        data->source_positions());
+    if (graph_builder.CreateGraph()) {
+      data->set_context_node(graph_builder.GetFunctionContext());
+    } else {
+      data->set_compilation_failed();
+    }
+  }
+};
+
+
+struct ContextSpecializerPhase {
+  static const char* phase_name() { return "context specializing"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSContextSpecializer spec(data->info(), data->jsgraph(),
+                              data->context_node());
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&spec);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct InliningPhase {
+  static const char* phase_name() { return "inlining"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSInliner inliner(temp_zone, data->info(), data->jsgraph());
+    inliner.Inline();
+  }
+};
+
+
+struct TyperPhase {
+  static const char* phase_name() { return "typer"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) { data->typer()->Run(); }
+};
+
+
+struct TypedLoweringPhase {
+  static const char* phase_name() { return "typed lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    ValueNumberingReducer vn_reducer(temp_zone);
+    LoadElimination load_elimination;
+    JSBuiltinReducer builtin_reducer(data->jsgraph());
+    JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&builtin_reducer);
+    graph_reducer.AddReducer(&typed_lowering);
+    graph_reducer.AddReducer(&load_elimination);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct SimplifiedLoweringPhase {
+  static const char* phase_name() { return "simplified lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    SimplifiedLowering lowering(data->jsgraph(), temp_zone);
+    lowering.LowerAllNodes();
+    ValueNumberingReducer vn_reducer(temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    MachineOperatorReducer machine_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&machine_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ChangeLoweringPhase {
+  static const char* phase_name() { return "change lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    Linkage linkage(data->graph_zone(), data->info());
+    ValueNumberingReducer vn_reducer(temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    ChangeLowering lowering(data->jsgraph(), &linkage);
+    MachineOperatorReducer machine_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&lowering);
+    graph_reducer.AddReducer(&machine_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ControlReductionPhase {
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    ControlReducer::ReduceGraph(temp_zone, data->jsgraph(), data->common());
+  }
+};
+
+
+struct EarlyControlReductionPhase : ControlReductionPhase {
+  static const char* phase_name() { return "early control reduction"; }
+};
+
+
+struct LateControlReductionPhase : ControlReductionPhase {
+  static const char* phase_name() { return "late control reduction"; }
+};
+
+
+struct GenericLoweringPhase {
+  static const char* phase_name() { return "generic lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSGenericLowering generic(data->info(), data->jsgraph());
+    SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&generic);
+    graph_reducer.AddReducer(&select);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ComputeSchedulePhase {
+  static const char* phase_name() { return "scheduling"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph());
+    TraceSchedule(schedule);
+    if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+    data->set_schedule(schedule);
+  }
+};
+
+
+struct InstructionSelectionPhase {
+  static const char* phase_name() { return "select instructions"; }
+
+  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
+    InstructionSelector selector(temp_zone, data->graph(), linkage,
+                                 data->sequence(), data->schedule(),
+                                 data->source_positions());
+    selector.SelectInstructions();
+  }
+};
+
+
+struct MeetRegisterConstraintsPhase {
+  static const char* phase_name() { return "meet register constraints"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->MeetRegisterConstraints();
+  }
+};
+
+
+struct ResolvePhisPhase {
+  static const char* phase_name() { return "resolve phis"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ResolvePhis();
+  }
+};
+
+
+struct BuildLiveRangesPhase {
+  static const char* phase_name() { return "build live ranges"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->BuildLiveRanges();
+  }
+};
+
+
+struct AllocateGeneralRegistersPhase {
+  static const char* phase_name() { return "allocate general registers"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->AllocateGeneralRegisters();
+  }
+};
+
+
+struct AllocateDoubleRegistersPhase {
+  static const char* phase_name() { return "allocate double registers"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->AllocateDoubleRegisters();
+  }
+};
+
+
+struct ReuseSpillSlotsPhase {
+  static const char* phase_name() { return "reuse spill slots"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ReuseSpillSlots();
+  }
+};
+
+
+struct CommitAssignmentPhase {
+  static const char* phase_name() { return "commit assignment"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->CommitAssignment();
+  }
+};
+
+
+struct PopulatePointerMapsPhase {
+  static const char* phase_name() { return "populate pointer maps"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->PopulatePointerMaps();
+  }
+};
+
+
+struct ConnectRangesPhase {
+  static const char* phase_name() { return "connect ranges"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ConnectRanges();
+  }
+};
+
+
+struct ResolveControlFlowPhase {
+  static const char* phase_name() { return "resolve control flow"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ResolveControlFlow();
+  }
+};
+
+
+struct OptimizeMovesPhase {
+  static const char* phase_name() { return "optimize moves"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    MoveOptimizer move_optimizer(temp_zone, data->sequence());
+    move_optimizer.Run();
+  }
+};
+
+
+struct JumpThreadingPhase {
+  static const char* phase_name() { return "jump threading"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    ZoneVector<BasicBlock::RpoNumber> result(temp_zone);
+    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+      JumpThreading::ApplyForwarding(result, data->sequence());
+    }
+  }
+};
+
+
+struct GenerateCodePhase {
+  static const char* phase_name() { return "generate code"; }
+
+  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
+    CodeGenerator generator(data->frame(), linkage, data->sequence(),
+                            data->info());
+    data->set_code(generator.GenerateCode());
+  }
+};
+
+
+struct PrintGraphPhase {
+  static const char* phase_name() { return nullptr; }
+
+  void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
+    CompilationInfo* info = data->info();
+    Graph* graph = data->graph();
+    char buffer[256];
+    Vector<char> filename(buffer, sizeof(buffer));
+    SmartArrayPointer<char> functionname;
+    if (!info->shared_info().is_null()) {
+      functionname = info->shared_info()->DebugName()->ToCString();
+      if (strlen(functionname.get()) > 0) {
+        SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
+      } else {
+        SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info), phase);
+      }
+    } else {
+      SNPrintF(filename, "turbo-none-%s", phase);
+    }
+    std::replace(filename.start(), filename.start() + filename.length(), ' ',
+                 '_');
+
+    {  // Print dot.
+      char dot_buffer[256];
+      Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
+      SNPrintF(dot_filename, "%s.dot", filename.start());
+      FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
+      if (dot_file == nullptr) return;
+      OFStream dot_of(dot_file);
+      dot_of << AsDOT(*graph);
+      fclose(dot_file);
+    }
+
+    {  // Print JSON.
+      char json_buffer[256];
+      Vector<char> json_filename(json_buffer, sizeof(json_buffer));
+      SNPrintF(json_filename, "%s.json", filename.start());
+      FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
+      if (json_file == nullptr) return;
+      OFStream json_of(json_file);
+      json_of << AsJSON(*graph);
+      fclose(json_file);
+    }
+
+    OFStream os(stdout);
+    if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+      os << "-- Graph after " << phase << " -- " << std::endl;
+      os << AsRPO(*graph);
+    }
+
+    os << "-- " << phase << " graph printed to file " << filename.start()
+       << std::endl;
+  }
+};
+
+
+struct VerifyGraphPhase {
+  static const char* phase_name() { return nullptr; }
+
+  void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
+    Verifier::Run(data->graph(), FLAG_turbo_types && !untyped
+                                     ? Verifier::TYPED
+                                     : Verifier::UNTYPED);
+  }
+};
+
+
+void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
+  if (data_->pipeline_statistics() != NULL) {
+    data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
+  }
+}
+
+
+void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
+  if (FLAG_trace_turbo) {
+    Run<PrintGraphPhase>(phase);
+  }
+  if (VerifyGraphs()) {
+    Run<VerifyGraphPhase>(untyped);
+  }
 }
 
 
 Handle<Code> Pipeline::GenerateCode() {
+  // This list must be kept in sync with DONT_TURBOFAN_NODE in ast.cc.
   if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
       info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
       // TODO(turbofan): Make ES6 for-of work and remove this bailout.
       info()->function()->dont_optimize_reason() == kForOfStatement ||
       // TODO(turbofan): Make super work and remove this bailout.
       info()->function()->dont_optimize_reason() == kSuperReference ||
+      // TODO(turbofan): Make class literals work and remove this bailout.
+      info()->function()->dont_optimize_reason() == kClassLiteral ||
       // TODO(turbofan): Make OSR work and remove this bailout.
       info()->is_osr()) {
     return Handle<Code>::null();
   }
 
-  if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
+  ZonePool zone_pool(isolate());
+  SmartPointer<PipelineStatistics> pipeline_statistics;
+
+  if (FLAG_turbo_stats) {
+    pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
+    pipeline_statistics->BeginPhaseKind("initializing");
+  }
+
+  PipelineData data(&zone_pool, info());
+  this->data_ = &data;
+  data.Initialize(pipeline_statistics.get());
+
+  BeginPhaseKind("graph creation");
 
   if (FLAG_trace_turbo) {
     OFStream os(stdout);
     os << "---------------------------------------------------\n"
-       << "Begin compiling method "
-       << info()->function()->debug_name()->ToCString().get()
-       << " using Turbofan" << endl;
+       << "Begin compiling method " << GetDebugName(info()).get()
+       << " using Turbofan" << std::endl;
+    TurboCfgFile tcf(isolate());
+    tcf << AsC1VCompilation(info());
   }
 
-  // Build the graph.
-  Graph graph(zone());
-  SourcePositionTable source_positions(&graph);
-  source_positions.AddDecorator();
-  // TODO(turbofan): there is no need to type anything during initial graph
-  // construction.  This is currently only needed for the node cache, which the
-  // typer could sweep over later.
-  Typer typer(zone());
-  MachineOperatorBuilder machine;
-  CommonOperatorBuilder common(zone());
-  JSOperatorBuilder javascript(zone());
-  JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
-  Node* context_node;
-  {
-    PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
-                                   "graph builder");
-    AstGraphBuilderWithPositions graph_builder(info(), &jsgraph,
-                                               &source_positions);
-    graph_builder.CreateGraph();
-    context_node = graph_builder.GetFunctionContext();
-  }
-  {
-    PhaseStats phi_reducer_stats(info(), PhaseStats::CREATE_GRAPH,
-                                 "phi reduction");
-    PhiReducer phi_reducer;
-    GraphReducer graph_reducer(&graph);
-    graph_reducer.AddReducer(&phi_reducer);
-    graph_reducer.ReduceGraph();
-    // TODO(mstarzinger): Running reducer once ought to be enough for everyone.
-    graph_reducer.ReduceGraph();
-    graph_reducer.ReduceGraph();
+  data.source_positions()->AddDecorator();
+
+  if (FLAG_loop_assignment_analysis) {
+    Run<LoopAssignmentAnalysisPhase>();
   }
 
-  VerifyAndPrintGraph(&graph, "Initial untyped");
+  Run<GraphBuilderPhase>();
+  if (data.compilation_failed()) return Handle<Code>::null();
+  RunPrintAndVerify("Initial untyped", true);
+
+  Run<EarlyControlReductionPhase>();
+  RunPrintAndVerify("Early Control reduced", true);
 
   if (info()->is_context_specializing()) {
-    SourcePositionTable::Scope pos(&source_positions,
-                                   SourcePosition::Unknown());
     // Specialize the code to the context as aggressively as possible.
-    JSContextSpecializer spec(info(), &jsgraph, context_node);
-    spec.SpecializeToContext();
-    VerifyAndPrintGraph(&graph, "Context specialized");
+    Run<ContextSpecializerPhase>();
+    RunPrintAndVerify("Context specialized", true);
   }
 
   if (info()->is_inlining_enabled()) {
-    SourcePositionTable::Scope pos(&source_positions,
-                                   SourcePosition::Unknown());
-    JSInliner inliner(info(), &jsgraph);
-    inliner.Inline();
-    VerifyAndPrintGraph(&graph, "Inlined");
+    Run<InliningPhase>();
+    RunPrintAndVerify("Inlined", true);
   }
 
-  // Print a replay of the initial graph.
   if (FLAG_print_turbo_replay) {
-    GraphReplayPrinter::PrintReplay(&graph);
+    // Print a replay of the initial graph.
+    GraphReplayPrinter::PrintReplay(data.graph());
   }
 
+  // Bailout here in case target architecture is not supported.
+  if (!SupportedTarget()) return Handle<Code>::null();
+
   if (info()->is_typing_enabled()) {
-    {
-      // Type the graph.
-      PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
-      typer.Run(&graph, info()->context());
-      VerifyAndPrintGraph(&graph, "Typed");
-    }
-    // All new nodes must be typed.
-    typer.DecorateGraph(&graph);
-    {
-      // Lower JSOperators where we can determine types.
-      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
-                                "typed lowering");
-      SourcePositionTable::Scope pos(&source_positions,
-                                     SourcePosition::Unknown());
-      JSTypedLowering lowering(&jsgraph);
-      GraphReducer graph_reducer(&graph);
-      graph_reducer.AddReducer(&lowering);
-      graph_reducer.ReduceGraph();
-
-      VerifyAndPrintGraph(&graph, "Lowered typed");
-    }
-    {
-      // Lower simplified operators and insert changes.
-      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
-                                "simplified lowering");
-      SourcePositionTable::Scope pos(&source_positions,
-                                     SourcePosition::Unknown());
-      SimplifiedLowering lowering(&jsgraph);
-      lowering.LowerAllNodes();
-
-      VerifyAndPrintGraph(&graph, "Lowered simplified");
-    }
-    {
-      // Lower changes that have been inserted before.
-      PhaseStats lowering_stats(info(), PhaseStats::OPTIMIZATION,
-                                "change lowering");
-      SourcePositionTable::Scope pos(&source_positions,
-                                     SourcePosition::Unknown());
-      Linkage linkage(info());
-      // TODO(turbofan): Value numbering disabled for now.
-      // ValueNumberingReducer vn_reducer(zone());
-      SimplifiedOperatorReducer simple_reducer(&jsgraph);
-      ChangeLowering lowering(&jsgraph, &linkage);
-      MachineOperatorReducer mach_reducer(&jsgraph);
-      GraphReducer graph_reducer(&graph);
-      // TODO(titzer): Figure out if we should run all reducers at once here.
-      // graph_reducer.AddReducer(&vn_reducer);
-      graph_reducer.AddReducer(&simple_reducer);
-      graph_reducer.AddReducer(&lowering);
-      graph_reducer.AddReducer(&mach_reducer);
-      graph_reducer.ReduceGraph();
-
-      VerifyAndPrintGraph(&graph, "Lowered changes");
-    }
+    // Type the graph.
+    Run<TyperPhase>();
+    RunPrintAndVerify("Typed");
   }
 
-  Handle<Code> code = Handle<Code>::null();
-  if (SupportedTarget()) {
-    {
-      // Lower any remaining generic JSOperators.
-      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
-                                "generic lowering");
-      SourcePositionTable::Scope pos(&source_positions,
-                                     SourcePosition::Unknown());
-      JSGenericLowering lowering(info(), &jsgraph);
-      GraphReducer graph_reducer(&graph);
-      graph_reducer.AddReducer(&lowering);
-      graph_reducer.ReduceGraph();
+  BeginPhaseKind("lowering");
 
-      VerifyAndPrintGraph(&graph, "Lowered generic");
-    }
+  if (info()->is_typing_enabled()) {
+    // Lower JSOperators where we can determine types.
+    Run<TypedLoweringPhase>();
+    RunPrintAndVerify("Lowered typed");
 
-    {
-      // Compute a schedule.
-      Schedule* schedule = ComputeSchedule(&graph);
-      // Generate optimized code.
-      PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
-      Linkage linkage(info());
-      code = GenerateCode(&linkage, &graph, schedule, &source_positions);
-      info()->SetCode(code);
-    }
+    // Lower simplified operators and insert changes.
+    Run<SimplifiedLoweringPhase>();
+    RunPrintAndVerify("Lowered simplified");
 
-    // Print optimized code.
-    v8::internal::CodeGenerator::PrintCode(code, info());
+    // Lower changes that have been inserted before.
+    Run<ChangeLoweringPhase>();
+    // // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+    RunPrintAndVerify("Lowered changes", true);
+
+    Run<LateControlReductionPhase>();
+    RunPrintAndVerify("Late Control reduced");
   }
 
+  // Lower any remaining generic JSOperators.
+  Run<GenericLoweringPhase>();
+  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+  RunPrintAndVerify("Lowered generic", true);
+
+  BeginPhaseKind("block building");
+
+  data.source_positions()->RemoveDecorator();
+
+  // Compute a schedule.
+  Run<ComputeSchedulePhase>();
+
+  {
+    // Generate optimized code.
+    Linkage linkage(data.instruction_zone(), info());
+    GenerateCode(&linkage);
+  }
+  Handle<Code> code = data.code();
+  info()->SetCode(code);
+
+  // Print optimized code.
+  v8::internal::CodeGenerator::PrintCode(code, info());
+
   if (FLAG_trace_turbo) {
     OFStream os(stdout);
-    os << "--------------------------------------------------\n"
-       << "Finished compiling method "
-       << info()->function()->debug_name()->ToCString().get()
-       << " using Turbofan" << endl;
+    os << "---------------------------------------------------\n"
+       << "Finished compiling method " << GetDebugName(info()).get()
+       << " using Turbofan" << std::endl;
   }
 
   return code;
 }
 
 
-Schedule* Pipeline::ComputeSchedule(Graph* graph) {
-  PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
-  Schedule* schedule = Scheduler::ComputeSchedule(graph);
-  TraceSchedule(schedule);
-  if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
-  return schedule;
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+                                              Graph* graph,
+                                              Schedule* schedule) {
+  CallDescriptor* call_descriptor =
+      Linkage::ComputeIncoming(info->zone(), info);
+  return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
 }
 
 
-Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
-                                                   Graph* graph,
-                                                   Schedule* schedule) {
-  CHECK(SupportedBackend());
-  if (schedule == NULL) {
-    VerifyAndPrintGraph(graph, "Machine");
-    schedule = ComputeSchedule(graph);
-  }
-  TraceSchedule(schedule);
+Handle<Code> Pipeline::GenerateCodeForTesting(CallDescriptor* call_descriptor,
+                                              Graph* graph,
+                                              Schedule* schedule) {
+  CompilationInfo info(graph->zone()->isolate(), graph->zone());
+  return GenerateCodeForTesting(&info, call_descriptor, graph, schedule);
+}
 
-  SourcePositionTable source_positions(graph);
-  Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions);
+
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+                                              CallDescriptor* call_descriptor,
+                                              Graph* graph,
+                                              Schedule* schedule) {
+  CHECK(SupportedBackend());
+  ZonePool zone_pool(info->isolate());
+  Pipeline pipeline(info);
+  PipelineData data(&zone_pool, info);
+  pipeline.data_ = &data;
+  data.InitializeTorTesting(graph, schedule);
+  if (schedule == NULL) {
+    // TODO(rossberg): Should this really be untyped?
+    pipeline.RunPrintAndVerify("Machine", true);
+    pipeline.Run<ComputeSchedulePhase>();
+  } else {
+    TraceSchedule(schedule);
+  }
+
+  Linkage linkage(info->zone(), call_descriptor);
+  pipeline.GenerateCode(&linkage);
+  Handle<Code> code = data.code();
+
 #if ENABLE_DISASSEMBLER
   if (!code.is_null() && FLAG_print_opt_code) {
-    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
     OFStream os(tracing_scope.file());
     code->Disassemble("test code", os);
   }
@@ -360,51 +926,157 @@
 }
 
 
-Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
-                                    Schedule* schedule,
-                                    SourcePositionTable* source_positions) {
-  DCHECK_NOT_NULL(graph);
+bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
+                                           InstructionSequence* sequence,
+                                           bool run_verifier) {
+  CompilationInfo info(sequence->zone()->isolate(), sequence->zone());
+  ZonePool zone_pool(sequence->zone()->isolate());
+  PipelineData data(&zone_pool, &info);
+  data.InitializeTorTesting(sequence);
+  Pipeline pipeline(&info);
+  pipeline.data_ = &data;
+  pipeline.AllocateRegisters(config, run_verifier);
+  return !data.compilation_failed();
+}
+
+
+void Pipeline::GenerateCode(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
   DCHECK_NOT_NULL(linkage);
-  DCHECK_NOT_NULL(schedule);
+  DCHECK_NOT_NULL(data->graph());
+  DCHECK_NOT_NULL(data->schedule());
   CHECK(SupportedBackend());
 
-  InstructionSequence sequence(linkage, graph, schedule);
+  BasicBlockProfiler::Data* profiler_data = NULL;
+  if (FLAG_turbo_profiling) {
+    profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
+                                                       data->schedule());
+  }
+
+  data->InitializeInstructionSequence();
 
   // Select and schedule instructions covering the scheduled graph.
-  {
-    InstructionSelector selector(&sequence, source_positions);
-    selector.SelectInstructions();
+  Run<InstructionSelectionPhase>(linkage);
+
+  if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+    TurboCfgFile tcf(isolate());
+    tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
+                 data->sequence());
   }
 
-  if (FLAG_trace_turbo) {
-    OFStream os(stdout);
-    os << "----- Instruction sequence before register allocation -----\n"
-       << sequence;
-  }
+  data->DeleteGraphZone();
 
+  BeginPhaseKind("register allocation");
+
+  bool run_verifier = false;
+#ifdef DEBUG
+  run_verifier = true;
+#endif
   // Allocate registers.
-  {
-    int node_count = graph->NodeCount();
-    if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
-      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
-      return Handle<Code>::null();
-    }
-    RegisterAllocator allocator(&sequence);
-    if (!allocator.Allocate()) {
-      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
-      return Handle<Code>::null();
-    }
+  AllocateRegisters(RegisterConfiguration::ArchDefault(), run_verifier);
+  if (data->compilation_failed()) {
+    info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+    return;
   }
 
-  if (FLAG_trace_turbo) {
+  BeginPhaseKind("code generation");
+
+  // Optimimize jumps.
+  if (FLAG_turbo_jt) {
+    Run<JumpThreadingPhase>();
+  }
+
+  // Generate final machine code.
+  Run<GenerateCodePhase>(linkage);
+
+  if (profiler_data != NULL) {
+#if ENABLE_DISASSEMBLER
+    std::ostringstream os;
+    data->code()->Disassemble(NULL, os);
+    profiler_data->SetCode(&os);
+#endif
+  }
+}
+
+
+void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
+                                 bool run_verifier) {
+  PipelineData* data = this->data_;
+
+  int node_count = data->sequence()->VirtualRegisterCount();
+  if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+    data->set_compilation_failed();
+    return;
+  }
+
+  // Don't track usage for this zone in compiler stats.
+  SmartPointer<Zone> verifier_zone;
+  RegisterAllocatorVerifier* verifier = nullptr;
+  if (run_verifier) {
+    verifier_zone.Reset(new Zone(info()->isolate()));
+    verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
+        verifier_zone.get(), config, data->sequence());
+  }
+
+  SmartArrayPointer<char> debug_name;
+#ifdef DEBUG
+  debug_name = GetDebugName(data->info());
+#endif
+
+  ZonePool::Scope zone_scope(data->zone_pool());
+  data->InitializeRegisterAllocator(zone_scope.zone(), config,
+                                    debug_name.get());
+
+  Run<MeetRegisterConstraintsPhase>();
+  Run<ResolvePhisPhase>();
+  Run<BuildLiveRangesPhase>();
+  if (FLAG_trace_turbo_graph) {
     OFStream os(stdout);
-    os << "----- Instruction sequence after register allocation -----\n"
-       << sequence;
+    PrintableInstructionSequence printable = {config, data->sequence()};
+    os << "----- Instruction sequence before register allocation -----\n"
+       << printable;
+  }
+  if (verifier != nullptr) {
+    CHECK(!data->register_allocator()->ExistsUseWithoutDefinition());
+  }
+  Run<AllocateGeneralRegistersPhase>();
+  if (!data->register_allocator()->AllocationOk()) {
+    data->set_compilation_failed();
+    return;
+  }
+  Run<AllocateDoubleRegistersPhase>();
+  if (!data->register_allocator()->AllocationOk()) {
+    data->set_compilation_failed();
+    return;
+  }
+  if (FLAG_turbo_reuse_spill_slots) {
+    Run<ReuseSpillSlotsPhase>();
+  }
+  Run<CommitAssignmentPhase>();
+  Run<PopulatePointerMapsPhase>();
+  Run<ConnectRangesPhase>();
+  Run<ResolveControlFlowPhase>();
+  if (FLAG_turbo_move_optimization) {
+    Run<OptimizeMovesPhase>();
   }
 
-  // Generate native sequence.
-  CodeGenerator generator(&sequence);
-  return generator.GenerateCode();
+  if (FLAG_trace_turbo_graph) {
+    OFStream os(stdout);
+    PrintableInstructionSequence printable = {config, data->sequence()};
+    os << "----- Instruction sequence after register allocation -----\n"
+       << printable;
+  }
+
+  if (verifier != nullptr) {
+    verifier->VerifyAssignment();
+    verifier->VerifyGapMoves();
+  }
+
+  if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+    TurboCfgFile tcf(data->isolate());
+    tcf << AsC1VAllocator("CodeGen", data->register_allocator());
+  }
 }
 
 
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 9f8241a..73053dc 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -17,10 +17,13 @@
 namespace compiler {
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
+class CallDescriptor;
 class Graph;
-class Schedule;
-class SourcePositionTable;
+class InstructionSequence;
 class Linkage;
+class PipelineData;
+class RegisterConfiguration;
+class Schedule;
 
 class Pipeline {
  public:
@@ -29,10 +32,22 @@
   // Run the entire pipeline and generate a handle to a code object.
   Handle<Code> GenerateCode();
 
-  // Run the pipeline on a machine graph and generate code. If {schedule}
-  // is {NULL}, then compute a new schedule for code generation.
-  Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
-                                           Schedule* schedule = NULL);
+  // Run the pipeline on a machine graph and generate code. If {schedule} is
+  // {nullptr}, then compute a new schedule for code generation.
+  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+                                             Graph* graph,
+                                             Schedule* schedule = nullptr);
+
+  // Run the pipeline on a machine graph and generate code. If {schedule} is
+  // {nullptr}, then compute a new schedule for code generation.
+  static Handle<Code> GenerateCodeForTesting(CallDescriptor* call_descriptor,
+                                             Graph* graph,
+                                             Schedule* schedule = nullptr);
+
+  // Run just the register allocator phases.
+  static bool AllocateRegistersForTesting(const RegisterConfiguration* config,
+                                          InstructionSequence* sequence,
+                                          bool run_verifier);
 
   static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
   static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
@@ -41,19 +56,31 @@
   static void TearDown();
 
  private:
+  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+                                             CallDescriptor* call_descriptor,
+                                             Graph* graph, Schedule* schedule);
+
   CompilationInfo* info_;
+  PipelineData* data_;
+
+  // Helpers for executing pipeline phases.
+  template <typename Phase>
+  void Run();
+  template <typename Phase, typename Arg0>
+  void Run(Arg0 arg_0);
 
   CompilationInfo* info() const { return info_; }
   Isolate* isolate() { return info_->isolate(); }
-  Zone* zone() { return info_->zone(); }
 
-  Schedule* ComputeSchedule(Graph* graph);
-  void VerifyAndPrintGraph(Graph* graph, const char* phase);
-  Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
-                            SourcePositionTable* source_positions);
+  void BeginPhaseKind(const char* phase_kind);
+  void RunPrintAndVerify(const char* phase, bool untyped = false);
+  void GenerateCode(Linkage* linkage);
+  void AllocateRegisters(const RegisterConfiguration* config,
+                         bool run_verifier);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_PIPELINE_H_
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 7f45eb9..b93ec66 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -13,10 +13,11 @@
 
 RawMachineAssembler::RawMachineAssembler(Graph* graph,
                                          MachineSignature* machine_sig,
-                                         MachineType word)
+                                         MachineType word,
+                                         MachineOperatorBuilder::Flags flags)
     : GraphBuilder(graph),
       schedule_(new (zone()) Schedule(zone())),
-      machine_(word),
+      machine_(zone(), word, flags),
       common_(zone()),
       machine_sig_(machine_sig),
       call_descriptor_(
@@ -39,7 +40,7 @@
 Schedule* RawMachineAssembler::Export() {
   // Compute the correct codegen order.
   DCHECK(schedule_->rpo_order()->empty());
-  Scheduler::ComputeSpecialRPO(schedule_);
+  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate MachineAssembler.
   Schedule* schedule = schedule_;
   schedule_ = NULL;
@@ -86,7 +87,8 @@
                                              CallFunctionFlags flags) {
   Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState, zone());
+      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
+      Operator::kNoProperties, zone());
   Node* stub_code = HeapConstant(callable.code());
   Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
                                 receiver, context, frame_state);
@@ -97,7 +99,8 @@
 
 Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
                                    Node* context, Node* frame_state) {
-  CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
+  CallDescriptor* descriptor =
+      Linkage::GetJSCallDescriptor(1, zone(), CallDescriptor::kNeedsFrameState);
   Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
                                 context, frame_state);
   schedule()->AddNode(CurrentBlock(), call);
@@ -150,10 +153,10 @@
 
 
 Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
-                                    Node** inputs) {
+                                    Node** inputs, bool incomplete) {
   DCHECK(ScheduleValid());
   DCHECK(current_block_ != NULL);
-  Node* node = graph()->NewNode(op, input_count, inputs);
+  Node* node = graph()->NewNode(op, input_count, inputs, incomplete);
   BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
                                                            : CurrentBlock();
   schedule()->AddNode(block, node);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index a4af55a..5455814 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -45,8 +45,10 @@
   };
 
   RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
-                      MachineType word = kMachPtr);
-  virtual ~RawMachineAssembler() {}
+                      MachineType word = kMachPtr,
+                      MachineOperatorBuilder::Flags flags =
+                          MachineOperatorBuilder::Flag::kNoFlags);
+  ~RawMachineAssembler() OVERRIDE {}
 
   Isolate* isolate() const { return zone()->isolate(); }
   Zone* zone() const { return graph()->zone(); }
@@ -57,7 +59,7 @@
   MachineSignature* machine_sig() const { return machine_sig_; }
 
   Node* UndefinedConstant() {
-    Unique<Object> unique = Unique<Object>::CreateImmovable(
+    Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
         isolate()->factory()->undefined_value());
     return NewNode(common()->HeapConstant(unique));
   }
@@ -80,11 +82,14 @@
   Node* NumberConstant(double value) {
     return NewNode(common()->NumberConstant(value));
   }
+  Node* Float32Constant(float value) {
+    return NewNode(common()->Float32Constant(value));
+  }
   Node* Float64Constant(double value) {
     return NewNode(common()->Float64Constant(value));
   }
-  Node* HeapConstant(Handle<Object> object) {
-    Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+  Node* HeapConstant(Handle<HeapObject> object) {
+    Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
     return NewNode(common()->HeapConstant(val));
   }
 
@@ -97,14 +102,15 @@
     return Load(rep, base, Int32Constant(0));
   }
   Node* Load(MachineType rep, Node* base, Node* index) {
-    return NewNode(machine()->Load(rep), base, index);
+    return NewNode(machine()->Load(rep), base, index, graph()->start(),
+                   graph()->start());
   }
   void Store(MachineType rep, Node* base, Node* value) {
     Store(rep, base, Int32Constant(0), value);
   }
   void Store(MachineType rep, Node* base, Node* index, Node* value) {
     NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
-            index, value);
+            index, value, graph()->start(), graph()->start());
   }
   // Arithmetic Operations.
   Node* WordAnd(Node* a, Node* b) {
@@ -222,17 +228,14 @@
   Node* Int32Mul(Node* a, Node* b) {
     return NewNode(machine()->Int32Mul(), a, b);
   }
-  Node* Int32Div(Node* a, Node* b) {
-    return NewNode(machine()->Int32Div(), a, b);
+  Node* Int32MulHigh(Node* a, Node* b) {
+    return NewNode(machine()->Int32MulHigh(), a, b);
   }
-  Node* Int32UDiv(Node* a, Node* b) {
-    return NewNode(machine()->Int32UDiv(), a, b);
+  Node* Int32Div(Node* a, Node* b) {
+    return NewNode(machine()->Int32Div(), a, b, graph()->start());
   }
   Node* Int32Mod(Node* a, Node* b) {
-    return NewNode(machine()->Int32Mod(), a, b);
-  }
-  Node* Int32UMod(Node* a, Node* b) {
-    return NewNode(machine()->Int32UMod(), a, b);
+    return NewNode(machine()->Int32Mod(), a, b, graph()->start());
   }
   Node* Int32LessThan(Node* a, Node* b) {
     return NewNode(machine()->Int32LessThan(), a, b);
@@ -240,12 +243,21 @@
   Node* Int32LessThanOrEqual(Node* a, Node* b) {
     return NewNode(machine()->Int32LessThanOrEqual(), a, b);
   }
+  Node* Uint32Div(Node* a, Node* b) {
+    return NewNode(machine()->Uint32Div(), a, b, graph()->start());
+  }
   Node* Uint32LessThan(Node* a, Node* b) {
     return NewNode(machine()->Uint32LessThan(), a, b);
   }
   Node* Uint32LessThanOrEqual(Node* a, Node* b) {
     return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
   }
+  Node* Uint32Mod(Node* a, Node* b) {
+    return NewNode(machine()->Uint32Mod(), a, b, graph()->start());
+  }
+  Node* Uint32MulHigh(Node* a, Node* b) {
+    return NewNode(machine()->Uint32MulHigh(), a, b);
+  }
   Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
   Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
     return Int32LessThanOrEqual(b, a);
@@ -264,15 +276,9 @@
   Node* Int64Div(Node* a, Node* b) {
     return NewNode(machine()->Int64Div(), a, b);
   }
-  Node* Int64UDiv(Node* a, Node* b) {
-    return NewNode(machine()->Int64UDiv(), a, b);
-  }
   Node* Int64Mod(Node* a, Node* b) {
     return NewNode(machine()->Int64Mod(), a, b);
   }
-  Node* Int64UMod(Node* a, Node* b) {
-    return NewNode(machine()->Int64UMod(), a, b);
-  }
   Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
   Node* Int64LessThan(Node* a, Node* b) {
     return NewNode(machine()->Int64LessThan(), a, b);
@@ -284,6 +290,12 @@
   Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
     return Int64LessThanOrEqual(b, a);
   }
+  Node* Uint64Div(Node* a, Node* b) {
+    return NewNode(machine()->Uint64Div(), a, b);
+  }
+  Node* Uint64Mod(Node* a, Node* b) {
+    return NewNode(machine()->Uint64Mod(), a, b);
+  }
 
   // TODO(turbofan): What is this used for?
   Node* ConvertIntPtrToInt32(Node* a) {
@@ -344,6 +356,9 @@
   }
 
   // Conversions.
+  Node* ChangeFloat32ToFloat64(Node* a) {
+    return NewNode(machine()->ChangeFloat32ToFloat64(), a);
+  }
   Node* ChangeInt32ToFloat64(Node* a) {
     return NewNode(machine()->ChangeInt32ToFloat64(), a);
   }
@@ -362,12 +377,23 @@
   Node* ChangeUint32ToUint64(Node* a) {
     return NewNode(machine()->ChangeUint32ToUint64(), a);
   }
+  Node* TruncateFloat64ToFloat32(Node* a) {
+    return NewNode(machine()->TruncateFloat64ToFloat32(), a);
+  }
   Node* TruncateFloat64ToInt32(Node* a) {
     return NewNode(machine()->TruncateFloat64ToInt32(), a);
   }
   Node* TruncateInt64ToInt32(Node* a) {
     return NewNode(machine()->TruncateInt64ToInt32(), a);
   }
+  Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
+  Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+  Node* Float64RoundTruncate(Node* a) {
+    return NewNode(machine()->Float64RoundTruncate(), a);
+  }
+  Node* Float64RoundTiesAway(Node* a) {
+    return NewNode(machine()->Float64RoundTiesAway(), a);
+  }
 
   // Parameters.
   Node* Parameter(size_t index);
@@ -404,8 +430,8 @@
   Schedule* Export();
 
  protected:
-  virtual Node* MakeNode(const Operator* op, int input_count,
-                         Node** inputs) FINAL;
+  Node* MakeNode(const Operator* op, int input_count, Node** inputs,
+                 bool incomplete) FINAL;
 
   bool ScheduleValid() { return schedule_ != NULL; }
 
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
new file mode 100644
index 0000000..dabfd59
--- /dev/null
+++ b/src/compiler/register-allocator-verifier.cc
@@ -0,0 +1,460 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/register-allocator-verifier.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static size_t OperandCount(const Instruction* instr) {
+  return instr->InputCount() + instr->OutputCount() + instr->TempCount();
+}
+
+
+static void VerifyGapEmpty(const GapInstruction* gap) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyInput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  if (constraint.type_ != kImmediate) {
+    CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+             constraint.virtual_register_);
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyTemp(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(kConstant, constraint.type_);
+  CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
+void RegisterAllocatorVerifier::VerifyOutput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
+RegisterAllocatorVerifier::RegisterAllocatorVerifier(
+    Zone* zone, const RegisterConfiguration* config,
+    const InstructionSequence* sequence)
+    : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+  constraints_.reserve(sequence->instructions().size());
+  // TODO(dcarney): model unique constraints.
+  // Construct OperandConstraints for all InstructionOperands, eliminating
+  // kSameAsFirst along the way.
+  for (const auto* instr : sequence->instructions()) {
+    const size_t operand_count = OperandCount(instr);
+    auto* op_constraints =
+        zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
+    size_t count = 0;
+    for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+      BuildConstraint(instr->InputAt(i), &op_constraints[count]);
+      VerifyInput(op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+      BuildConstraint(instr->TempAt(i), &op_constraints[count]);
+      VerifyTemp(op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+      BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
+      if (op_constraints[count].type_ == kSameAsFirst) {
+        CHECK(instr->InputCount() > 0);
+        op_constraints[count].type_ = op_constraints[0].type_;
+        op_constraints[count].value_ = op_constraints[0].value_;
+      }
+      VerifyOutput(op_constraints[count]);
+    }
+    // All gaps should be totally unallocated at this point.
+    if (instr->IsGapMoves()) {
+      CHECK(operand_count == 0);
+      VerifyGapEmpty(GapInstruction::cast(instr));
+    }
+    InstructionConstraint instr_constraint = {instr, operand_count,
+                                              op_constraints};
+    constraints()->push_back(instr_constraint);
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyAssignment() {
+  CHECK(sequence()->instructions().size() == constraints()->size());
+  auto instr_it = sequence()->begin();
+  for (const auto& instr_constraint : *constraints()) {
+    const auto* instr = instr_constraint.instruction_;
+    const size_t operand_count = instr_constraint.operand_constaints_size_;
+    const auto* op_constraints = instr_constraint.operand_constraints_;
+    CHECK_EQ(instr, *instr_it);
+    CHECK(operand_count == OperandCount(instr));
+    size_t count = 0;
+    for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+      CheckConstraint(instr->InputAt(i), &op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+      CheckConstraint(instr->TempAt(i), &op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+      CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
+    }
+    ++instr_it;
+  }
+}
+
+
+void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
+                                                OperandConstraint* constraint) {
+  constraint->value_ = kMinInt;
+  constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
+  if (op->IsConstant()) {
+    constraint->type_ = kConstant;
+    constraint->value_ = ConstantOperand::cast(op)->index();
+    constraint->virtual_register_ = constraint->value_;
+  } else if (op->IsImmediate()) {
+    constraint->type_ = kImmediate;
+    constraint->value_ = ImmediateOperand::cast(op)->index();
+  } else {
+    CHECK(op->IsUnallocated());
+    const auto* unallocated = UnallocatedOperand::cast(op);
+    int vreg = unallocated->virtual_register();
+    constraint->virtual_register_ = vreg;
+    if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+      constraint->type_ = kFixedSlot;
+      constraint->value_ = unallocated->fixed_slot_index();
+    } else {
+      switch (unallocated->extended_policy()) {
+        case UnallocatedOperand::ANY:
+          CHECK(false);
+          break;
+        case UnallocatedOperand::NONE:
+          if (sequence()->IsDouble(vreg)) {
+            constraint->type_ = kNoneDouble;
+          } else {
+            constraint->type_ = kNone;
+          }
+          break;
+        case UnallocatedOperand::FIXED_REGISTER:
+          constraint->type_ = kFixedRegister;
+          constraint->value_ = unallocated->fixed_register_index();
+          break;
+        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+          constraint->type_ = kFixedDoubleRegister;
+          constraint->value_ = unallocated->fixed_register_index();
+          break;
+        case UnallocatedOperand::MUST_HAVE_REGISTER:
+          if (sequence()->IsDouble(vreg)) {
+            constraint->type_ = kDoubleRegister;
+          } else {
+            constraint->type_ = kRegister;
+          }
+          break;
+        case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+          constraint->type_ = kSameAsFirst;
+          break;
+      }
+    }
+  }
+}
+
+
+void RegisterAllocatorVerifier::CheckConstraint(
+    const InstructionOperand* op, const OperandConstraint* constraint) {
+  switch (constraint->type_) {
+    case kConstant:
+      CHECK(op->IsConstant());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kImmediate:
+      CHECK(op->IsImmediate());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kRegister:
+      CHECK(op->IsRegister());
+      return;
+    case kFixedRegister:
+      CHECK(op->IsRegister());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kDoubleRegister:
+      CHECK(op->IsDoubleRegister());
+      return;
+    case kFixedDoubleRegister:
+      CHECK(op->IsDoubleRegister());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kFixedSlot:
+      CHECK(op->IsStackSlot());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kNone:
+      CHECK(op->IsRegister() || op->IsStackSlot());
+      return;
+    case kNoneDouble:
+      CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+      return;
+    case kSameAsFirst:
+      CHECK(false);
+      return;
+  }
+}
+
+
+class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
+ public:
+  struct OperandLess {
+    bool operator()(const InstructionOperand* a,
+                    const InstructionOperand* b) const {
+      if (a->kind() == b->kind()) return a->index() < b->index();
+      return a->kind() < b->kind();
+    }
+  };
+
+  typedef std::map<
+      const InstructionOperand*, int, OperandLess,
+      zone_allocator<std::pair<const InstructionOperand*, const int>>>
+      LocationMap;
+
+  explicit OutgoingMapping(Zone* zone)
+      : locations_(LocationMap::key_compare(),
+                   LocationMap::allocator_type(zone)),
+        predecessor_intersection_(LocationMap::key_compare(),
+                                  LocationMap::allocator_type(zone)) {}
+
+  LocationMap* locations() { return &locations_; }
+
+  void RunPhis(const InstructionSequence* sequence,
+               const InstructionBlock* block, size_t phi_index) {
+    // This operation is only valid in edge split form.
+    size_t predecessor_index = block->predecessors()[phi_index].ToSize();
+    CHECK(sequence->instruction_blocks()[predecessor_index]->SuccessorCount() ==
+          1);
+    for (const auto* phi : block->phis()) {
+      auto input = phi->inputs()[phi_index];
+      CHECK(locations()->find(input) != locations()->end());
+      auto it = locations()->find(phi->output());
+      CHECK(it != locations()->end());
+      if (input->IsConstant()) {
+        CHECK_EQ(it->second, input->index());
+      } else {
+        CHECK_EQ(it->second, phi->operands()[phi_index]);
+      }
+      it->second = phi->virtual_register();
+    }
+  }
+
+  void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      GapInstruction::InnerPosition inner_pos =
+          static_cast<GapInstruction::InnerPosition>(i);
+      const ParallelMove* move = gap->GetParallelMove(inner_pos);
+      if (move == nullptr) continue;
+      RunParallelMoves(zone, move);
+    }
+  }
+
+  void RunParallelMoves(Zone* zone, const ParallelMove* move) {
+    // Compute outgoing mappings.
+    LocationMap to_insert((LocationMap::key_compare()),
+                          LocationMap::allocator_type(zone));
+    auto* moves = move->move_operands();
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      auto cur = locations()->find(i->source());
+      CHECK(cur != locations()->end());
+      to_insert.insert(std::make_pair(i->destination(), cur->second));
+    }
+    // Drop current mappings.
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      auto cur = locations()->find(i->destination());
+      if (cur != locations()->end()) locations()->erase(cur);
+    }
+    // Insert new values.
+    locations()->insert(to_insert.begin(), to_insert.end());
+  }
+
+  void Map(const InstructionOperand* op, int virtual_register) {
+    locations()->insert(std::make_pair(op, virtual_register));
+  }
+
+  void Drop(const InstructionOperand* op) {
+    auto it = locations()->find(op);
+    if (it != locations()->end()) locations()->erase(it);
+  }
+
+  void DropRegisters(const RegisterConfiguration* config) {
+    for (int i = 0; i < config->num_general_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::REGISTER, i);
+      Drop(&op);
+    }
+    for (int i = 0; i < config->num_double_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::DOUBLE_REGISTER, i);
+      Drop(&op);
+    }
+  }
+
+  void InitializeFromFirstPredecessor(const InstructionSequence* sequence,
+                                      const OutgoingMappings* outgoing_mappings,
+                                      const InstructionBlock* block) {
+    if (block->predecessors().empty()) return;
+    size_t predecessor_index = block->predecessors()[0].ToSize();
+    CHECK(predecessor_index < block->rpo_number().ToSize());
+    auto* incoming = outgoing_mappings->at(predecessor_index);
+    if (block->PredecessorCount() > 1) {
+      // Update incoming map with phis. The remaining phis will be checked later
+      // as their mappings are not guaranteed to exist yet.
+      incoming->RunPhis(sequence, block, 0);
+    }
+    // Now initialize outgoing mapping for this block with incoming mapping.
+    CHECK(locations_.empty());
+    locations_ = incoming->locations_;
+  }
+
+  void InitializeFromIntersection() { locations_ = predecessor_intersection_; }
+
+  void InitializeIntersection(const OutgoingMapping* incoming) {
+    CHECK(predecessor_intersection_.empty());
+    predecessor_intersection_ = incoming->locations_;
+  }
+
+  void Intersect(const OutgoingMapping* other) {
+    if (predecessor_intersection_.empty()) return;
+    auto it = predecessor_intersection_.begin();
+    OperandLess less;
+    for (const auto& o : other->locations_) {
+      while (less(it->first, o.first)) {
+        ++it;
+        if (it == predecessor_intersection_.end()) return;
+      }
+      if (it->first->Equals(o.first)) {
+        if (o.second != it->second) {
+          predecessor_intersection_.erase(it++);
+        } else {
+          ++it;
+        }
+        if (it == predecessor_intersection_.end()) return;
+      }
+    }
+  }
+
+ private:
+  LocationMap locations_;
+  LocationMap predecessor_intersection_;
+
+  DISALLOW_COPY_AND_ASSIGN(OutgoingMapping);
+};
+
+
+// Verify that all gap moves move the operands for a virtual register into the
+// correct location for every instruction.
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+  OutgoingMappings outgoing_mappings(
+      static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
+      zone());
+  // Construct all mappings, ignoring back edges and multiple entries.
+  ConstructOutgoingMappings(&outgoing_mappings, true);
+  // Run all remaining phis and compute the intersection of all predecessor
+  // mappings.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    if (block->PredecessorCount() == 0) continue;
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* mapping = outgoing_mappings[block_index];
+    bool initialized = false;
+    // Walk predecessors in reverse to ensure Intersect is correctly working.
+    // If it did nothing, the second pass would do exactly what the first pass
+    // did.
+    for (size_t phi_input = block->PredecessorCount() - 1; true; --phi_input) {
+      const size_t pred_block_index = block->predecessors()[phi_input].ToSize();
+      auto* incoming = outgoing_mappings[pred_block_index];
+      if (phi_input != 0) incoming->RunPhis(sequence(), block, phi_input);
+      if (!initialized) {
+        mapping->InitializeIntersection(incoming);
+        initialized = true;
+      } else {
+        mapping->Intersect(incoming);
+      }
+      if (phi_input == 0) break;
+    }
+  }
+  // Construct all mappings again, this time using the instersection mapping
+  // above as the incoming mapping instead of the result from the first
+  // predecessor.
+  ConstructOutgoingMappings(&outgoing_mappings, false);
+}
+
+
+void RegisterAllocatorVerifier::ConstructOutgoingMappings(
+    OutgoingMappings* outgoing_mappings, bool initial_pass) {
+  // Compute the locations of all virtual registers leaving every block, using
+  // only the first predecessor as source for the input mapping.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* current = outgoing_mappings->at(block_index);
+    CHECK(initial_pass == (current == nullptr));
+    // Initialize current.
+    if (!initial_pass) {
+      // Skip check second time around for blocks without multiple predecessors
+      // as we have already executed this in the initial run.
+      if (block->PredecessorCount() <= 1) continue;
+      current->InitializeFromIntersection();
+    } else {
+      current = new (zone()) OutgoingMapping(zone());
+      outgoing_mappings->at(block_index) = current;
+      // Copy outgoing values from predecessor block.
+      current->InitializeFromFirstPredecessor(sequence(), outgoing_mappings,
+                                              block);
+    }
+    // Update current with gaps and operands for all instructions in block.
+    for (int instr_index = block->code_start(); instr_index < block->code_end();
+         ++instr_index) {
+      const auto& instr_constraint = constraints_[instr_index];
+      const auto* instr = instr_constraint.instruction_;
+      const auto* op_constraints = instr_constraint.operand_constraints_;
+      size_t count = 0;
+      for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+        if (op_constraints[count].type_ == kImmediate) continue;
+        auto it = current->locations()->find(instr->InputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        CHECK(it != current->locations()->end());
+        CHECK_EQ(it->second, virtual_register);
+      }
+      for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+        current->Drop(instr->TempAt(i));
+      }
+      if (instr->IsCall()) {
+        current->DropRegisters(config());
+      }
+      for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+        current->Drop(instr->OutputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        current->Map(instr->OutputAt(i), virtual_register);
+      }
+      if (instr->IsGapMoves()) {
+        const auto* gap = GapInstruction::cast(instr);
+        current->RunGapInstruction(zone(), gap);
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
new file mode 100644
index 0000000..4e35dc2
--- /dev/null
+++ b/src/compiler/register-allocator-verifier.h
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionOperand;
+class InstructionSequence;
+
+class RegisterAllocatorVerifier FINAL : public ZoneObject {
+ public:
+  RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
+                            const InstructionSequence* sequence);
+
+  void VerifyAssignment();
+  void VerifyGapMoves();
+
+ private:
+  enum ConstraintType {
+    kConstant,
+    kImmediate,
+    kRegister,
+    kFixedRegister,
+    kDoubleRegister,
+    kFixedDoubleRegister,
+    kFixedSlot,
+    kNone,
+    kNoneDouble,
+    kSameAsFirst
+  };
+
+  struct OperandConstraint {
+    ConstraintType type_;
+    int value_;  // subkind index when relevant
+    int virtual_register_;
+  };
+
+  struct InstructionConstraint {
+    const Instruction* instruction_;
+    size_t operand_constaints_size_;
+    OperandConstraint* operand_constraints_;
+  };
+
+  class OutgoingMapping;
+
+  typedef ZoneVector<InstructionConstraint> Constraints;
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+
+  Zone* zone() const { return zone_; }
+  const RegisterConfiguration* config() { return config_; }
+  const InstructionSequence* sequence() const { return sequence_; }
+  Constraints* constraints() { return &constraints_; }
+
+  static void VerifyInput(const OperandConstraint& constraint);
+  static void VerifyTemp(const OperandConstraint& constraint);
+  static void VerifyOutput(const OperandConstraint& constraint);
+
+  void BuildConstraint(const InstructionOperand* op,
+                       OperandConstraint* constraint);
+  void CheckConstraint(const InstructionOperand* op,
+                       const OperandConstraint* constraint);
+
+  void ConstructOutgoingMappings(OutgoingMappings* outgoing_mappings,
+                                 bool initial_pass);
+
+  Zone* const zone_;
+  const RegisterConfiguration* config_;
+  const InstructionSequence* const sequence_;
+  Constraints constraints_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 972a904..9eb4a47 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -2,10 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/register-allocator.h"
-
 #include "src/compiler/linkage.h"
-#include "src/hydrogen.h"
+#include "src/compiler/register-allocator.h"
 #include "src/string-stream.h"
 
 namespace v8 {
@@ -22,15 +20,32 @@
 }
 
 
+static void TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
+  auto it = std::find(v->begin(), v->end(), range);
+  DCHECK(it != v->end());
+  v->erase(it);
+}
+
+
 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
                          InstructionOperand* hint)
     : operand_(operand),
       hint_(hint),
       pos_(pos),
-      next_(NULL),
+      next_(nullptr),
       requires_reg_(false),
       register_beneficial_(true) {
-  if (operand_ != NULL && operand_->IsUnallocated()) {
+  if (operand_ != nullptr && operand_->IsUnallocated()) {
     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
     requires_reg_ = unalloc->HasRegisterPolicy();
     register_beneficial_ = !unalloc->HasAnyPolicy();
@@ -40,7 +55,7 @@
 
 
 bool UsePosition::HasHint() const {
-  return hint_ != NULL && !hint_->IsUnallocated();
+  return hint_ != nullptr && !hint_->IsUnallocated();
 }
 
 
@@ -52,19 +67,29 @@
 
 void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
   DCHECK(Contains(pos) && pos.Value() != start().Value());
-  UseInterval* after = new (zone) UseInterval(pos, end_);
+  auto after = new (zone) UseInterval(pos, end_);
   after->next_ = next_;
   next_ = after;
   end_ = pos;
 }
 
 
+struct LiveRange::SpillAtDefinitionList : ZoneObject {
+  SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
+                        SpillAtDefinitionList* next)
+      : gap_index(gap_index), operand(operand), next(next) {}
+  const int gap_index;
+  InstructionOperand* const operand;
+  SpillAtDefinitionList* const next;
+};
+
+
 #ifdef DEBUG
 
 
 void LiveRange::Verify() const {
   UsePosition* cur = first_pos_;
-  while (cur != NULL) {
+  while (cur != nullptr) {
     DCHECK(Start().Value() <= cur->pos().Value() &&
            cur->pos().Value() <= End().Value());
     cur = cur->next();
@@ -74,7 +99,7 @@
 
 bool LiveRange::HasOverlap(UseInterval* target) const {
   UseInterval* current_interval = first_interval_;
-  while (current_interval != NULL) {
+  while (current_interval != nullptr) {
     // Intervals overlap if the start of one is contained in the other.
     if (current_interval->Contains(target->start()) ||
         target->Contains(current_interval->start())) {
@@ -96,52 +121,87 @@
       is_non_loop_phi_(false),
       kind_(UNALLOCATED_REGISTERS),
       assigned_register_(kInvalidAssignment),
-      last_interval_(NULL),
-      first_interval_(NULL),
-      first_pos_(NULL),
-      parent_(NULL),
-      next_(NULL),
-      current_interval_(NULL),
-      last_processed_use_(NULL),
-      current_hint_operand_(NULL),
-      spill_operand_(new (zone) InstructionOperand()),
-      spill_start_index_(kMaxInt) {}
+      last_interval_(nullptr),
+      first_interval_(nullptr),
+      first_pos_(nullptr),
+      parent_(nullptr),
+      next_(nullptr),
+      current_interval_(nullptr),
+      last_processed_use_(nullptr),
+      current_hint_operand_(nullptr),
+      spill_start_index_(kMaxInt),
+      spill_type_(SpillType::kNoSpillType),
+      spill_operand_(nullptr),
+      spills_at_definition_(nullptr) {}
 
 
 void LiveRange::set_assigned_register(int reg, Zone* zone) {
   DCHECK(!HasRegisterAssigned() && !IsSpilled());
   assigned_register_ = reg;
-  ConvertOperands(zone);
+  // TODO(dcarney): stop aliasing hint operands.
+  ConvertUsesToOperand(CreateAssignedOperand(zone));
 }
 
 
-void LiveRange::MakeSpilled(Zone* zone) {
+void LiveRange::MakeSpilled() {
   DCHECK(!IsSpilled());
-  DCHECK(TopLevel()->HasAllocatedSpillOperand());
+  DCHECK(!TopLevel()->HasNoSpillType());
   spilled_ = true;
   assigned_register_ = kInvalidAssignment;
-  ConvertOperands(zone);
 }
 
 
-bool LiveRange::HasAllocatedSpillOperand() const {
-  DCHECK(spill_operand_ != NULL);
-  return !spill_operand_->IsIgnored();
+void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
+                                  InstructionOperand* operand) {
+  DCHECK(HasNoSpillType());
+  spills_at_definition_ = new (zone)
+      SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
+}
+
+
+void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
+                                         InstructionOperand* op) {
+  auto to_spill = TopLevel()->spills_at_definition_;
+  if (to_spill == nullptr) return;
+  auto zone = sequence->zone();
+  for (; to_spill != nullptr; to_spill = to_spill->next) {
+    auto gap = sequence->GapAt(to_spill->gap_index);
+    auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
+    move->AddMove(to_spill->operand, op, zone);
+  }
+  TopLevel()->spills_at_definition_ = nullptr;
 }
 
 
 void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+  DCHECK(HasNoSpillType());
   DCHECK(!operand->IsUnallocated());
-  DCHECK(spill_operand_ != NULL);
-  DCHECK(spill_operand_->IsIgnored());
-  spill_operand_->ConvertTo(operand->kind(), operand->index());
+  spill_type_ = SpillType::kSpillOperand;
+  spill_operand_ = operand;
+}
+
+
+void LiveRange::SetSpillRange(SpillRange* spill_range) {
+  DCHECK(HasNoSpillType() || HasSpillRange());
+  DCHECK_NE(spill_range, nullptr);
+  spill_type_ = SpillType::kSpillRange;
+  spill_range_ = spill_range;
+}
+
+
+void LiveRange::CommitSpillOperand(InstructionOperand* operand) {
+  DCHECK(HasSpillRange());
+  DCHECK(!operand->IsUnallocated());
+  DCHECK(!IsChild());
+  spill_type_ = SpillType::kSpillOperand;
+  spill_operand_ = operand;
 }
 
 
 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
   UsePosition* use_pos = last_processed_use_;
-  if (use_pos == NULL) use_pos = first_pos();
-  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+  if (use_pos == nullptr) use_pos = first_pos();
+  while (use_pos != nullptr && use_pos->pos().Value() < start.Value()) {
     use_pos = use_pos->next();
   }
   last_processed_use_ = use_pos;
@@ -152,7 +212,7 @@
 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
     LifetimePosition start) {
   UsePosition* pos = NextUsePosition(start);
-  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+  while (pos != nullptr && !pos->RegisterIsBeneficial()) {
     pos = pos->next();
   }
   return pos;
@@ -161,9 +221,9 @@
 
 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
     LifetimePosition start) {
-  UsePosition* pos = first_pos();
-  UsePosition* prev = NULL;
-  while (pos != NULL && pos->pos().Value() < start.Value()) {
+  auto pos = first_pos();
+  UsePosition* prev = nullptr;
+  while (pos != nullptr && pos->pos().Value() < start.Value()) {
     if (pos->RegisterIsBeneficial()) prev = pos;
     pos = pos->next();
   }
@@ -173,7 +233,7 @@
 
 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
   UsePosition* pos = NextUsePosition(start);
-  while (pos != NULL && !pos->RequiresRegister()) {
+  while (pos != nullptr && !pos->RequiresRegister()) {
     pos = pos->next();
   }
   return pos;
@@ -183,15 +243,15 @@
 bool LiveRange::CanBeSpilled(LifetimePosition pos) {
   // We cannot spill a live range that has a use requiring a register
   // at the current or the immediate next position.
-  UsePosition* use_pos = NextRegisterPosition(pos);
-  if (use_pos == NULL) return true;
+  auto use_pos = NextRegisterPosition(pos);
+  if (use_pos == nullptr) return true;
   return use_pos->pos().Value() >
          pos.NextInstruction().InstructionEnd().Value();
 }
 
 
-InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
-  InstructionOperand* op = NULL;
+InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
+  InstructionOperand* op = nullptr;
   if (HasRegisterAssigned()) {
     DCHECK(!IsSpilled());
     switch (Kind()) {
@@ -204,15 +264,11 @@
       default:
         UNREACHABLE();
     }
-  } else if (IsSpilled()) {
+  } else {
+    DCHECK(IsSpilled());
     DCHECK(!HasRegisterAssigned());
     op = TopLevel()->GetSpillOperand();
     DCHECK(!op->IsUnallocated());
-  } else {
-    UnallocatedOperand* unalloc =
-        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
-    unalloc->set_virtual_register(id_);
-    op = unalloc;
   }
   return op;
 }
@@ -220,9 +276,9 @@
 
 UseInterval* LiveRange::FirstSearchIntervalForPosition(
     LifetimePosition position) const {
-  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_ == nullptr) return first_interval_;
   if (current_interval_->start().Value() > position.Value()) {
-    current_interval_ = NULL;
+    current_interval_ = nullptr;
     return first_interval_;
   }
   return current_interval_;
@@ -231,11 +287,10 @@
 
 void LiveRange::AdvanceLastProcessedMarker(
     UseInterval* to_start_of, LifetimePosition but_not_past) const {
-  if (to_start_of == NULL) return;
+  if (to_start_of == nullptr) return;
   if (to_start_of->start().Value() > but_not_past.Value()) return;
-  LifetimePosition start = current_interval_ == NULL
-                               ? LifetimePosition::Invalid()
-                               : current_interval_->start();
+  auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
+                                            : current_interval_->start();
   if (to_start_of->start().Value() > start.Value()) {
     current_interval_ = to_start_of;
   }
@@ -249,7 +304,7 @@
   // Find the last interval that ends before the position. If the
   // position is contained in one of the intervals in the chain, we
   // split that interval and use the first part.
-  UseInterval* current = FirstSearchIntervalForPosition(position);
+  auto current = FirstSearchIntervalForPosition(position);
 
   // If the split position coincides with the beginning of a use interval
   // we need to split use positons in a special way.
@@ -260,12 +315,12 @@
     current = first_interval_;
   }
 
-  while (current != NULL) {
+  while (current != nullptr) {
     if (current->Contains(position)) {
       current->SplitAt(position, zone);
       break;
     }
-    UseInterval* next = current->next();
+    auto next = current->next();
     if (next->start().Value() >= position.Value()) {
       split_at_start = (next->start().Value() == position.Value());
       break;
@@ -274,8 +329,8 @@
   }
 
   // Partition original use intervals to the two live ranges.
-  UseInterval* before = current;
-  UseInterval* after = before->next();
+  auto before = current;
+  auto after = before->next();
   result->last_interval_ =
       (last_interval_ == before)
           ? after            // Only interval in the range after split.
@@ -285,39 +340,41 @@
 
   // Find the last use position before the split and the first use
   // position after it.
-  UsePosition* use_after = first_pos_;
-  UsePosition* use_before = NULL;
+  auto use_after = first_pos_;
+  UsePosition* use_before = nullptr;
   if (split_at_start) {
     // The split position coincides with the beginning of a use interval (the
     // end of a lifetime hole). Use at this position should be attributed to
     // the split child because split child owns use interval covering it.
-    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+    while (use_after != nullptr &&
+           use_after->pos().Value() < position.Value()) {
       use_before = use_after;
       use_after = use_after->next();
     }
   } else {
-    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+    while (use_after != nullptr &&
+           use_after->pos().Value() <= position.Value()) {
       use_before = use_after;
       use_after = use_after->next();
     }
   }
 
   // Partition original use positions to the two live ranges.
-  if (use_before != NULL) {
-    use_before->next_ = NULL;
+  if (use_before != nullptr) {
+    use_before->next_ = nullptr;
   } else {
-    first_pos_ = NULL;
+    first_pos_ = nullptr;
   }
   result->first_pos_ = use_after;
 
   // Discard cached iteration state. It might be pointing
   // to the use that no longer belongs to this live range.
-  last_processed_use_ = NULL;
-  current_interval_ = NULL;
+  last_processed_use_ = nullptr;
+  current_interval_ = nullptr;
 
   // Link the new live range in the chain before any of the other
   // ranges linked from the range before the split.
-  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->parent_ = (parent_ == nullptr) ? this : parent_;
   result->kind_ = result->parent_->kind_;
   result->next_ = next_;
   next_ = result;
@@ -339,9 +396,9 @@
   LifetimePosition other_start = other->Start();
   if (start.Value() == other_start.Value()) {
     UsePosition* pos = first_pos();
-    if (pos == NULL) return false;
+    if (pos == nullptr) return false;
     UsePosition* other_pos = other->first_pos();
-    if (other_pos == NULL) return true;
+    if (other_pos == nullptr) return true;
     return pos->pos().Value() < other_pos->pos().Value();
   }
   return start.Value() < other_start.Value();
@@ -349,9 +406,8 @@
 
 
 void LiveRange::ShortenTo(LifetimePosition start) {
-  RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
-                                start.Value());
-  DCHECK(first_interval_ != NULL);
+  TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
+  DCHECK(first_interval_ != nullptr);
   DCHECK(first_interval_->start().Value() <= start.Value());
   DCHECK(start.Value() < first_interval_->end().Value());
   first_interval_->set_start(start);
@@ -360,10 +416,10 @@
 
 void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
                                Zone* zone) {
-  RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
-                                id_, start.Value(), end.Value());
-  LifetimePosition new_end = end;
-  while (first_interval_ != NULL &&
+  TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
+             end.Value());
+  auto new_end = end;
+  while (first_interval_ != nullptr &&
          first_interval_->start().Value() <= end.Value()) {
     if (first_interval_->end().Value() > end.Value()) {
       new_end = first_interval_->end();
@@ -371,10 +427,10 @@
     first_interval_ = first_interval_->next();
   }
 
-  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+  auto new_interval = new (zone) UseInterval(start, new_end);
   new_interval->next_ = first_interval_;
   first_interval_ = new_interval;
-  if (new_interval->next() == NULL) {
+  if (new_interval->next() == nullptr) {
     last_interval_ = new_interval;
   }
 }
@@ -382,17 +438,17 @@
 
 void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
                                Zone* zone) {
-  RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
-                                start.Value(), end.Value());
-  if (first_interval_ == NULL) {
-    UseInterval* interval = new (zone) UseInterval(start, end);
+  TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(),
+             end.Value());
+  if (first_interval_ == nullptr) {
+    auto interval = new (zone) UseInterval(start, end);
     first_interval_ = interval;
     last_interval_ = interval;
   } else {
     if (end.Value() == first_interval_->start().Value()) {
       first_interval_->set_start(start);
     } else if (end.Value() < first_interval_->start().Value()) {
-      UseInterval* interval = new (zone) UseInterval(start, end);
+      auto interval = new (zone) UseInterval(start, end);
       interval->set_next(first_interval_);
       first_interval_ = interval;
     } else {
@@ -410,19 +466,18 @@
 void LiveRange::AddUsePosition(LifetimePosition pos,
                                InstructionOperand* operand,
                                InstructionOperand* hint, Zone* zone) {
-  RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
-                                pos.Value());
-  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
-  UsePosition* prev_hint = NULL;
-  UsePosition* prev = NULL;
-  UsePosition* current = first_pos_;
-  while (current != NULL && current->pos().Value() < pos.Value()) {
+  TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value());
+  auto use_pos = new (zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = nullptr;
+  UsePosition* prev = nullptr;
+  auto current = first_pos_;
+  while (current != nullptr && current->pos().Value() < pos.Value()) {
     prev_hint = current->HasHint() ? current : prev_hint;
     prev = current;
     current = current->next();
   }
 
-  if (prev == NULL) {
+  if (prev == nullptr) {
     use_pos->set_next(first_pos_);
     first_pos_ = use_pos;
   } else {
@@ -430,16 +485,15 @@
     prev->next_ = use_pos;
   }
 
-  if (prev_hint == NULL && use_pos->HasHint()) {
+  if (prev_hint == nullptr && use_pos->HasHint()) {
     current_hint_operand_ = hint;
   }
 }
 
 
-void LiveRange::ConvertOperands(Zone* zone) {
-  InstructionOperand* op = CreateAssignedOperand(zone);
-  UsePosition* use_pos = first_pos();
-  while (use_pos != NULL) {
+void LiveRange::ConvertUsesToOperand(InstructionOperand* op) {
+  auto use_pos = first_pos();
+  while (use_pos != nullptr) {
     DCHECK(Start().Value() <= use_pos->pos().Value() &&
            use_pos->pos().Value() <= End().Value());
 
@@ -462,10 +516,10 @@
 
 bool LiveRange::Covers(LifetimePosition position) {
   if (!CanCover(position)) return false;
-  UseInterval* start_search = FirstSearchIntervalForPosition(position);
-  for (UseInterval* interval = start_search; interval != NULL;
+  auto start_search = FirstSearchIntervalForPosition(position);
+  for (auto interval = start_search; interval != nullptr;
        interval = interval->next()) {
-    DCHECK(interval->next() == NULL ||
+    DCHECK(interval->next() == nullptr ||
            interval->next()->start().Value() >= interval->start().Value());
     AdvanceLastProcessedMarker(interval, position);
     if (interval->Contains(position)) return true;
@@ -476,20 +530,20 @@
 
 
 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
-  UseInterval* b = other->first_interval();
-  if (b == NULL) return LifetimePosition::Invalid();
-  LifetimePosition advance_last_processed_up_to = b->start();
-  UseInterval* a = FirstSearchIntervalForPosition(b->start());
-  while (a != NULL && b != NULL) {
+  auto b = other->first_interval();
+  if (b == nullptr) return LifetimePosition::Invalid();
+  auto advance_last_processed_up_to = b->start();
+  auto a = FirstSearchIntervalForPosition(b->start());
+  while (a != nullptr && b != nullptr) {
     if (a->start().Value() > other->End().Value()) break;
     if (b->start().Value() > End().Value()) break;
-    LifetimePosition cur_intersection = a->Intersect(b);
+    auto cur_intersection = a->Intersect(b);
     if (cur_intersection.IsValid()) {
       return cur_intersection;
     }
     if (a->start().Value() < b->start().Value()) {
       a = a->next();
-      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      if (a == nullptr || a->start().Value() > other->End().Value()) break;
       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
     } else {
       b = b->next();
@@ -499,84 +553,99 @@
 }
 
 
-RegisterAllocator::RegisterAllocator(InstructionSequence* code)
-    : zone_(code->isolate()),
+RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
+                                     Zone* zone, Frame* frame,
+                                     InstructionSequence* code,
+                                     const char* debug_name)
+    : local_zone_(zone),
+      frame_(frame),
       code_(code),
-      live_in_sets_(code->BasicBlockCount(), zone()),
-      live_ranges_(code->VirtualRegisterCount() * 2, zone()),
-      fixed_live_ranges_(NULL),
-      fixed_double_live_ranges_(NULL),
-      unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
-      active_live_ranges_(8, zone()),
-      inactive_live_ranges_(8, zone()),
-      reusable_slots_(8, zone()),
+      debug_name_(debug_name),
+      config_(config),
+      phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())),
+      live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
+      live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
+      fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
+                         local_zone()),
+      fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
+                                local_zone()),
+      unhandled_live_ranges_(local_zone()),
+      active_live_ranges_(local_zone()),
+      inactive_live_ranges_(local_zone()),
+      reusable_slots_(local_zone()),
+      spill_ranges_(local_zone()),
       mode_(UNALLOCATED_REGISTERS),
       num_registers_(-1),
-      allocation_ok_(true) {}
-
-
-void RegisterAllocator::InitializeLivenessAnalysis() {
-  // Initialize the live_in sets for each block to NULL.
-  int block_count = code()->BasicBlockCount();
-  live_in_sets_.Initialize(block_count, zone());
-  live_in_sets_.AddBlock(NULL, block_count, zone());
+      allocation_ok_(true) {
+  DCHECK(this->config()->num_general_registers() <=
+         RegisterConfiguration::kMaxGeneralRegisters);
+  DCHECK(this->config()->num_double_registers() <=
+         RegisterConfiguration::kMaxDoubleRegisters);
+  // TryAllocateFreeReg and AllocateBlockedReg assume this
+  // when allocating local arrays.
+  DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+         this->config()->num_general_registers());
+  unhandled_live_ranges().reserve(
+      static_cast<size_t>(code->VirtualRegisterCount() * 2));
+  active_live_ranges().reserve(8);
+  inactive_live_ranges().reserve(8);
+  reusable_slots().reserve(8);
+  spill_ranges().reserve(8);
+  assigned_registers_ =
+      new (code_zone()) BitVector(config->num_general_registers(), code_zone());
+  assigned_double_registers_ = new (code_zone())
+      BitVector(config->num_aliased_double_registers(), code_zone());
+  frame->SetAllocatedRegisters(assigned_registers_);
+  frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
 }
 
 
-BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
   // Compute live out for the given block, except not including backward
   // successor edges.
-  BitVector* live_out =
-      new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
+  auto live_out = new (local_zone())
+      BitVector(code()->VirtualRegisterCount(), local_zone());
 
   // Process all successor blocks.
-  BasicBlock::Successors successors = block->successors();
-  for (BasicBlock::Successors::iterator i = successors.begin();
-       i != successors.end(); ++i) {
+  for (auto succ : block->successors()) {
     // Add values live on entry to the successor. Note the successor's
     // live_in will not be computed yet for backwards edges.
-    BasicBlock* successor = *i;
-    BitVector* live_in = live_in_sets_[successor->rpo_number_];
-    if (live_in != NULL) live_out->Union(*live_in);
+    auto live_in = live_in_sets_[succ.ToSize()];
+    if (live_in != nullptr) live_out->Union(*live_in);
 
     // All phi input operands corresponding to this successor edge are live
     // out from this block.
-    int index = successor->PredecessorIndexOf(block);
-    DCHECK(index >= 0);
-    DCHECK(index < static_cast<int>(successor->PredecessorCount()));
-    for (BasicBlock::const_iterator j = successor->begin();
-         j != successor->end(); ++j) {
-      Node* phi = *j;
-      if (phi->opcode() != IrOpcode::kPhi) continue;
-      Node* input = phi->InputAt(index);
-      live_out->Add(input->id());
+    auto successor = code()->InstructionBlockAt(succ);
+    size_t index = successor->PredecessorIndexOf(block->rpo_number());
+    DCHECK(index < successor->PredecessorCount());
+    for (auto phi : successor->phis()) {
+      live_out->Add(phi->operands()[index]);
     }
   }
-
   return live_out;
 }
 
 
-void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
                                             BitVector* live_out) {
   // Add an interval that includes the entire block to the live range for
   // each live_out value.
-  LifetimePosition start =
+  auto start =
       LifetimePosition::FromInstructionIndex(block->first_instruction_index());
-  LifetimePosition end = LifetimePosition::FromInstructionIndex(
-                             block->last_instruction_index()).NextInstruction();
+  auto end = LifetimePosition::FromInstructionIndex(
+                 block->last_instruction_index()).NextInstruction();
   BitVector::Iterator iterator(live_out);
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
-    LiveRange* range = LiveRangeFor(operand_index);
-    range->AddUseInterval(start, end, zone());
+    auto range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end, local_zone());
     iterator.Advance();
   }
 }
 
 
 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+  return -index - 1 - config()->num_general_registers();
 }
 
 
@@ -598,7 +667,7 @@
   }
   if (is_tagged) {
     TraceAlloc("Fixed reg is tagged at %d\n", pos);
-    Instruction* instr = InstructionAt(pos);
+    auto instr = InstructionAt(pos);
     if (instr->HasPointerMap()) {
       instr->pointer_map()->RecordPointer(operand, code_zone());
     }
@@ -608,51 +677,52 @@
 
 
 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
-  DCHECK(index < Register::kMaxNumAllocatableRegisters);
-  LiveRange* result = fixed_live_ranges_[index];
-  if (result == NULL) {
+  DCHECK(index < config()->num_general_registers());
+  auto result = fixed_live_ranges()[index];
+  if (result == nullptr) {
     // TODO(titzer): add a utility method to allocate a new LiveRange:
     // The LiveRange object itself can go in this zone, but the
     // InstructionOperand needs
     // to go in the code zone, since it may survive register allocation.
-    result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+    result = new (local_zone()) LiveRange(FixedLiveRangeID(index), code_zone());
     DCHECK(result->IsFixed());
     result->kind_ = GENERAL_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
-    fixed_live_ranges_[index] = result;
+    fixed_live_ranges()[index] = result;
   }
   return result;
 }
 
 
 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
-  DCHECK(index < DoubleRegister::NumAllocatableRegisters());
-  LiveRange* result = fixed_double_live_ranges_[index];
-  if (result == NULL) {
-    result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+  DCHECK(index < config()->num_aliased_double_registers());
+  auto result = fixed_double_live_ranges()[index];
+  if (result == nullptr) {
+    result = new (local_zone())
+        LiveRange(FixedDoubleLiveRangeID(index), code_zone());
     DCHECK(result->IsFixed());
     result->kind_ = DOUBLE_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
-    fixed_double_live_ranges_[index] = result;
+    fixed_double_live_ranges()[index] = result;
   }
   return result;
 }
 
 
 LiveRange* RegisterAllocator::LiveRangeFor(int index) {
-  if (index >= live_ranges_.length()) {
-    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+  if (index >= static_cast<int>(live_ranges().size())) {
+    live_ranges().resize(index + 1, nullptr);
   }
-  LiveRange* result = live_ranges_[index];
-  if (result == NULL) {
-    result = new (zone()) LiveRange(index, code_zone());
-    live_ranges_[index] = result;
+  auto result = live_ranges()[index];
+  if (result == nullptr) {
+    result = new (local_zone()) LiveRange(index, code_zone());
+    live_ranges()[index] = result;
   }
   return result;
 }
 
 
-GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
+GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock* block) {
   int last_instruction = block->last_instruction_index();
   return code()->GapAt(last_instruction - 1);
 }
@@ -666,7 +736,7 @@
   } else if (operand->IsDoubleRegister()) {
     return FixedDoubleLiveRangeFor(operand->index());
   } else {
-    return NULL;
+    return nullptr;
   }
 }
 
@@ -674,20 +744,21 @@
 void RegisterAllocator::Define(LifetimePosition position,
                                InstructionOperand* operand,
                                InstructionOperand* hint) {
-  LiveRange* range = LiveRangeFor(operand);
-  if (range == NULL) return;
+  auto range = LiveRangeFor(operand);
+  if (range == nullptr) return;
 
   if (range->IsEmpty() || range->Start().Value() > position.Value()) {
     // Can happen if there is a definition without use.
-    range->AddUseInterval(position, position.NextInstruction(), zone());
-    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+    range->AddUseInterval(position, position.NextInstruction(), local_zone());
+    range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
+                          local_zone());
   } else {
     range->ShortenTo(position);
   }
 
   if (operand->IsUnallocated()) {
-    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, hint, zone());
+    auto unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, local_zone());
   }
 }
 
@@ -696,48 +767,279 @@
                             LifetimePosition position,
                             InstructionOperand* operand,
                             InstructionOperand* hint) {
-  LiveRange* range = LiveRangeFor(operand);
-  if (range == NULL) return;
+  auto range = LiveRangeFor(operand);
+  if (range == nullptr) return;
   if (operand->IsUnallocated()) {
     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, hint, zone());
+    range->AddUsePosition(position, unalloc_operand, hint, local_zone());
   }
-  range->AddUseInterval(block_start, position, zone());
+  range->AddUseInterval(block_start, position, local_zone());
 }
 
 
-void RegisterAllocator::AddConstraintsGapMove(int index,
-                                              InstructionOperand* from,
-                                              InstructionOperand* to) {
-  GapInstruction* gap = code()->GapAt(index);
-  ParallelMove* move =
-      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-  if (from->IsUnallocated()) {
-    const ZoneList<MoveOperands>* move_operands = move->move_operands();
-    for (int i = 0; i < move_operands->length(); ++i) {
-      MoveOperands cur = move_operands->at(i);
-      InstructionOperand* cur_to = cur.destination();
-      if (cur_to->IsUnallocated()) {
-        if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
-            UnallocatedOperand::cast(from)->virtual_register()) {
-          move->AddMove(cur.source(), to, code_zone());
-          return;
-        }
-      }
-    }
-  }
+void RegisterAllocator::AddGapMove(int index,
+                                   GapInstruction::InnerPosition position,
+                                   InstructionOperand* from,
+                                   InstructionOperand* to) {
+  auto gap = code()->GapAt(index);
+  auto move = gap->GetOrCreateParallelMove(position, code_zone());
   move->AddMove(from, to, code_zone());
 }
 
 
-void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
+static bool AreUseIntervalsIntersecting(UseInterval* interval1,
+                                        UseInterval* interval2) {
+  while (interval1 != nullptr && interval2 != nullptr) {
+    if (interval1->start().Value() < interval2->start().Value()) {
+      if (interval1->end().Value() > interval2->start().Value()) {
+        return true;
+      }
+      interval1 = interval1->next();
+    } else {
+      if (interval2->end().Value() > interval1->start().Value()) {
+        return true;
+      }
+      interval2 = interval2->next();
+    }
+  }
+  return false;
+}
+
+
+SpillRange::SpillRange(LiveRange* range, Zone* zone) : live_ranges_(zone) {
+  auto src = range->first_interval();
+  UseInterval* result = nullptr;
+  UseInterval* node = nullptr;
+  // Copy the nodes
+  while (src != nullptr) {
+    auto new_node = new (zone) UseInterval(src->start(), src->end());
+    if (result == nullptr) {
+      result = new_node;
+    } else {
+      node->set_next(new_node);
+    }
+    node = new_node;
+    src = src->next();
+  }
+  use_interval_ = result;
+  live_ranges().push_back(range);
+  end_position_ = node->end();
+  DCHECK(!range->HasSpillRange());
+  range->SetSpillRange(this);
+}
+
+
+bool SpillRange::IsIntersectingWith(SpillRange* other) const {
+  if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
+      this->End().Value() <= other->use_interval_->start().Value() ||
+      other->End().Value() <= this->use_interval_->start().Value()) {
+    return false;
+  }
+  return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
+}
+
+
+bool SpillRange::TryMerge(SpillRange* other) {
+  if (Kind() != other->Kind() || IsIntersectingWith(other)) return false;
+
+  auto max = LifetimePosition::MaxPosition();
+  if (End().Value() < other->End().Value() &&
+      other->End().Value() != max.Value()) {
+    end_position_ = other->End();
+  }
+  other->end_position_ = max;
+
+  MergeDisjointIntervals(other->use_interval_);
+  other->use_interval_ = nullptr;
+
+  for (auto range : other->live_ranges()) {
+    DCHECK(range->GetSpillRange() == other);
+    range->SetSpillRange(this);
+  }
+
+  live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
+                       other->live_ranges().end());
+  other->live_ranges().clear();
+
+  return true;
+}
+
+
+void SpillRange::SetOperand(InstructionOperand* op) {
+  for (auto range : live_ranges()) {
+    DCHECK(range->GetSpillRange() == this);
+    range->CommitSpillOperand(op);
+  }
+}
+
+
+void SpillRange::MergeDisjointIntervals(UseInterval* other) {
+  UseInterval* tail = nullptr;
+  auto current = use_interval_;
+  while (other != nullptr) {
+    // Make sure the 'current' list starts first
+    if (current == nullptr ||
+        current->start().Value() > other->start().Value()) {
+      std::swap(current, other);
+    }
+    // Check disjointness
+    DCHECK(other == nullptr ||
+           current->end().Value() <= other->start().Value());
+    // Append the 'current' node to the result accumulator and move forward
+    if (tail == nullptr) {
+      use_interval_ = current;
+    } else {
+      tail->set_next(current);
+    }
+    tail = current;
+    current = current->next();
+  }
+  // Other list is empty => we are done
+}
+
+
+void RegisterAllocator::ReuseSpillSlots() {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+
+  // Merge disjoint spill ranges
+  for (size_t i = 0; i < spill_ranges().size(); i++) {
+    auto range = spill_ranges()[i];
+    if (range->IsEmpty()) continue;
+    for (size_t j = i + 1; j < spill_ranges().size(); j++) {
+      auto other = spill_ranges()[j];
+      if (!other->IsEmpty()) {
+        range->TryMerge(other);
+      }
+    }
+  }
+
+  // Allocate slots for the merged spill ranges.
+  for (auto range : spill_ranges()) {
+    if (range->IsEmpty()) continue;
+    // Allocate a new operand referring to the spill slot.
+    auto kind = range->Kind();
+    int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+    auto op_kind = kind == DOUBLE_REGISTERS
+                       ? InstructionOperand::DOUBLE_STACK_SLOT
+                       : InstructionOperand::STACK_SLOT;
+    auto op = new (code_zone()) InstructionOperand(op_kind, index);
+    range->SetOperand(op);
+  }
+}
+
+
+void RegisterAllocator::CommitAssignment() {
+  for (auto range : live_ranges()) {
+    if (range == nullptr || range->IsEmpty()) continue;
+    // Register assignments were committed in set_assigned_register.
+    if (range->HasRegisterAssigned()) continue;
+    auto assigned = range->CreateAssignedOperand(code_zone());
+    range->ConvertUsesToOperand(assigned);
+    if (range->IsSpilled()) {
+      range->CommitSpillsAtDefinition(code(), assigned);
+    }
+  }
+}
+
+
+SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+  auto spill_range = new (local_zone()) SpillRange(range, local_zone());
+  spill_ranges().push_back(spill_range);
+  return spill_range;
+}
+
+
+bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+  if (range->IsChild() || !range->is_phi()) return false;
+  DCHECK(range->HasNoSpillType());
+
+  auto lookup = phi_map_.find(range->id());
+  DCHECK(lookup != phi_map_.end());
+  auto phi = lookup->second.phi;
+  auto block = lookup->second.block;
+  // Count the number of spilled operands.
+  size_t spilled_count = 0;
+  LiveRange* first_op = nullptr;
+  for (size_t i = 0; i < phi->operands().size(); i++) {
+    int op = phi->operands()[i];
+    LiveRange* op_range = LiveRangeFor(op);
+    if (op_range->GetSpillRange() == nullptr) continue;
+    auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
+    auto pred_end =
+        LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+    while (op_range != nullptr && !op_range->CanCover(pred_end)) {
+      op_range = op_range->next();
+    }
+    if (op_range != nullptr && op_range->IsSpilled()) {
+      spilled_count++;
+      if (first_op == nullptr) {
+        first_op = op_range->TopLevel();
+      }
+    }
+  }
+
+  // Only continue if more than half of the operands are spilled.
+  if (spilled_count * 2 <= phi->operands().size()) {
+    return false;
+  }
+
+  // Try to merge the spilled operands and count the number of merged spilled
+  // operands.
+  DCHECK(first_op != nullptr);
+  auto first_op_spill = first_op->GetSpillRange();
+  size_t num_merged = 1;
+  for (size_t i = 1; i < phi->operands().size(); i++) {
+    int op = phi->operands()[i];
+    auto op_range = LiveRangeFor(op);
+    auto op_spill = op_range->GetSpillRange();
+    if (op_spill != nullptr &&
+        (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
+      num_merged++;
+    }
+  }
+
+  // Only continue if enough operands could be merged to the
+  // same spill slot.
+  if (num_merged * 2 <= phi->operands().size() ||
+      AreUseIntervalsIntersecting(first_op_spill->interval(),
+                                  range->first_interval())) {
+    return false;
+  }
+
+  // If the range does not need register soon, spill it to the merged
+  // spill range.
+  auto next_pos = range->Start();
+  if (code()->IsGapAt(next_pos.InstructionIndex())) {
+    next_pos = next_pos.NextInstruction();
+  }
+  auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+  if (pos == nullptr) {
+    auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+    CHECK(first_op_spill->TryMerge(spill_range));
+    Spill(range);
+    return true;
+  } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
+    auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+    CHECK(first_op_spill->TryMerge(spill_range));
+    SpillBetween(range, range->Start(), pos->pos());
+    if (!AllocationOk()) return false;
+    DCHECK(UnhandledIsSorted());
+    return true;
+  }
+  return false;
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
   int start = block->first_instruction_index();
   int end = block->last_instruction_index();
   DCHECK_NE(-1, start);
   for (int i = start; i <= end; ++i) {
     if (code()->IsGapAt(i)) {
-      Instruction* instr = NULL;
-      Instruction* prev_instr = NULL;
+      Instruction* instr = nullptr;
+      Instruction* prev_instr = nullptr;
       if (i < end) instr = InstructionAt(i + 1);
       if (i > start) prev_instr = InstructionAt(i - 1);
       MeetConstraintsBetween(prev_instr, instr, i);
@@ -753,30 +1055,30 @@
 
 
 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
-    BasicBlock* block) {
+    const InstructionBlock* block) {
   int end = block->last_instruction_index();
-  Instruction* last_instruction = InstructionAt(end);
+  auto last_instruction = InstructionAt(end);
   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
-    InstructionOperand* output_operand = last_instruction->OutputAt(i);
+    auto output_operand = last_instruction->OutputAt(i);
     DCHECK(!output_operand->IsConstant());
-    UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+    auto output = UnallocatedOperand::cast(output_operand);
     int output_vreg = output->virtual_register();
-    LiveRange* range = LiveRangeFor(output_vreg);
+    auto range = LiveRangeFor(output_vreg);
     bool assigned = false;
     if (output->HasFixedPolicy()) {
       AllocateFixed(output, -1, false);
       // This value is produced on the stack, we never need to spill it.
       if (output->IsStackSlot()) {
+        DCHECK(output->index() < 0);
         range->SetSpillOperand(output);
         range->SetSpillStartIndex(end);
         assigned = true;
       }
 
-      BasicBlock::Successors successors = block->successors();
-      for (BasicBlock::Successors::iterator succ = successors.begin();
-           succ != successors.end(); ++succ) {
-        DCHECK((*succ)->PredecessorCount() == 1);
-        int gap_index = (*succ)->first_instruction_index() + 1;
+      for (auto succ : block->successors()) {
+        const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+        DCHECK(successor->PredecessorCount() == 1);
+        int gap_index = successor->first_instruction_index() + 1;
         DCHECK(code()->IsGapAt(gap_index));
 
         // Create an unconstrained operand for the same virtual register
@@ -785,26 +1087,17 @@
             new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
         output_copy->set_virtual_register(output_vreg);
 
-        code()->AddGapMove(gap_index, output, output_copy);
+        AddGapMove(gap_index, GapInstruction::START, output, output_copy);
       }
     }
 
     if (!assigned) {
-      BasicBlock::Successors successors = block->successors();
-      for (BasicBlock::Successors::iterator succ = successors.begin();
-           succ != successors.end(); ++succ) {
-        DCHECK((*succ)->PredecessorCount() == 1);
-        int gap_index = (*succ)->first_instruction_index() + 1;
+      for (auto succ : block->successors()) {
+        const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+        DCHECK(successor->PredecessorCount() == 1);
+        int gap_index = successor->first_instruction_index() + 1;
+        range->SpillAtDefinition(local_zone(), gap_index, output);
         range->SetSpillStartIndex(gap_index);
-
-        // This move to spill operand is not a real use. Liveness analysis
-        // and splitting of live ranges do not account for it.
-        // Thus it should be inserted to a lifetime position corresponding to
-        // the instruction end.
-        GapInstruction* gap = code()->GapAt(gap_index);
-        ParallelMove* move =
-            gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
-        move->AddMove(output, range->GetSpillOperand(), code_zone());
       }
     }
   }
@@ -814,10 +1107,10 @@
 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
                                                Instruction* second,
                                                int gap_index) {
-  if (first != NULL) {
+  if (first != nullptr) {
     // Handle fixed temporaries.
     for (size_t i = 0; i < first->TempCount(); i++) {
-      UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+      auto temp = UnallocatedOperand::cast(first->TempAt(i));
       if (temp->HasFixedPolicy()) {
         AllocateFixed(temp, gap_index - 1, false);
       }
@@ -828,66 +1121,58 @@
       InstructionOperand* output = first->OutputAt(i);
       if (output->IsConstant()) {
         int output_vreg = output->index();
-        LiveRange* range = LiveRangeFor(output_vreg);
+        auto range = LiveRangeFor(output_vreg);
         range->SetSpillStartIndex(gap_index - 1);
         range->SetSpillOperand(output);
       } else {
-        UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
-        LiveRange* range = LiveRangeFor(first_output->virtual_register());
+        auto first_output = UnallocatedOperand::cast(output);
+        auto range = LiveRangeFor(first_output->virtual_register());
         bool assigned = false;
         if (first_output->HasFixedPolicy()) {
-          UnallocatedOperand* output_copy =
-              first_output->CopyUnconstrained(code_zone());
+          auto output_copy = first_output->CopyUnconstrained(code_zone());
           bool is_tagged = HasTaggedValue(first_output->virtual_register());
           AllocateFixed(first_output, gap_index, is_tagged);
 
           // This value is produced on the stack, we never need to spill it.
           if (first_output->IsStackSlot()) {
+            DCHECK(first_output->index() < 0);
             range->SetSpillOperand(first_output);
             range->SetSpillStartIndex(gap_index - 1);
             assigned = true;
           }
-          code()->AddGapMove(gap_index, first_output, output_copy);
+          AddGapMove(gap_index, GapInstruction::START, first_output,
+                     output_copy);
         }
 
         // Make sure we add a gap move for spilling (if we have not done
         // so already).
         if (!assigned) {
+          range->SpillAtDefinition(local_zone(), gap_index, first_output);
           range->SetSpillStartIndex(gap_index);
-
-          // This move to spill operand is not a real use. Liveness analysis
-          // and splitting of live ranges do not account for it.
-          // Thus it should be inserted to a lifetime position corresponding to
-          // the instruction end.
-          GapInstruction* gap = code()->GapAt(gap_index);
-          ParallelMove* move =
-              gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
-          move->AddMove(first_output, range->GetSpillOperand(), code_zone());
         }
       }
     }
   }
 
-  if (second != NULL) {
+  if (second != nullptr) {
     // Handle fixed input operands of second instruction.
     for (size_t i = 0; i < second->InputCount(); i++) {
-      InstructionOperand* input = second->InputAt(i);
+      auto input = second->InputAt(i);
       if (input->IsImmediate()) continue;  // Ignore immediates.
-      UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+      auto cur_input = UnallocatedOperand::cast(input);
       if (cur_input->HasFixedPolicy()) {
-        UnallocatedOperand* input_copy =
-            cur_input->CopyUnconstrained(code_zone());
+        auto input_copy = cur_input->CopyUnconstrained(code_zone());
         bool is_tagged = HasTaggedValue(cur_input->virtual_register());
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
-        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+        AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
       }
     }
 
     // Handle "output same as input" for second instruction.
     for (size_t i = 0; i < second->OutputCount(); i++) {
-      InstructionOperand* output = second->OutputAt(i);
+      auto output = second->OutputAt(i);
       if (!output->IsUnallocated()) continue;
-      UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+      auto second_output = UnallocatedOperand::cast(output);
       if (second_output->HasSameAsInputPolicy()) {
         DCHECK(i == 0);  // Only valid for first output.
         UnallocatedOperand* cur_input =
@@ -895,10 +1180,9 @@
         int output_vreg = second_output->virtual_register();
         int input_vreg = cur_input->virtual_register();
 
-        UnallocatedOperand* input_copy =
-            cur_input->CopyUnconstrained(code_zone());
+        auto input_copy = cur_input->CopyUnconstrained(code_zone());
         cur_input->set_virtual_register(second_output->virtual_register());
-        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+        AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
 
         if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
           int index = gap_index + 1;
@@ -922,7 +1206,7 @@
 
 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
+    auto output = instr->OutputAt(i);
     if (output->IsRegister() && output->index() == index) return true;
   }
   return false;
@@ -932,69 +1216,72 @@
 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
                                                  int index) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
+    auto output = instr->OutputAt(i);
     if (output->IsDoubleRegister() && output->index() == index) return true;
   }
   return false;
 }
 
 
-void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
                                             BitVector* live) {
   int block_start = block->first_instruction_index();
-
-  LifetimePosition block_start_position =
+  auto block_start_position =
       LifetimePosition::FromInstructionIndex(block_start);
 
   for (int index = block->last_instruction_index(); index >= block_start;
        index--) {
-    LifetimePosition curr_position =
-        LifetimePosition::FromInstructionIndex(index);
-
-    Instruction* instr = InstructionAt(index);
-    DCHECK(instr != NULL);
+    auto curr_position = LifetimePosition::FromInstructionIndex(index);
+    auto instr = InstructionAt(index);
+    DCHECK(instr != nullptr);
     if (instr->IsGapMoves()) {
       // Process the moves of the gap instruction, making their sources live.
-      GapInstruction* gap = code()->GapAt(index);
-
-      // TODO(titzer): no need to create the parallel move if it doesn't exist.
-      ParallelMove* move =
-          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-      const ZoneList<MoveOperands>* move_operands = move->move_operands();
-      for (int i = 0; i < move_operands->length(); ++i) {
-        MoveOperands* cur = &move_operands->at(i);
-        if (cur->IsIgnored()) continue;
-        InstructionOperand* from = cur->source();
-        InstructionOperand* to = cur->destination();
-        InstructionOperand* hint = to;
-        if (to->IsUnallocated()) {
-          int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
-          LiveRange* to_range = LiveRangeFor(to_vreg);
-          if (to_range->is_phi()) {
-            if (to_range->is_non_loop_phi()) {
-              hint = to_range->current_hint_operand();
+      auto gap = code()->GapAt(index);
+      const GapInstruction::InnerPosition kPositions[] = {
+          GapInstruction::END, GapInstruction::START};
+      for (auto position : kPositions) {
+        auto move = gap->GetParallelMove(position);
+        if (move == nullptr) continue;
+        if (position == GapInstruction::END) {
+          curr_position = curr_position.InstructionEnd();
+        } else {
+          curr_position = curr_position.InstructionStart();
+        }
+        auto move_ops = move->move_operands();
+        for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
+          auto from = cur->source();
+          auto to = cur->destination();
+          auto hint = to;
+          if (to->IsUnallocated()) {
+            int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+            auto to_range = LiveRangeFor(to_vreg);
+            if (to_range->is_phi()) {
+              DCHECK(!FLAG_turbo_delay_ssa_decon);
+              if (to_range->is_non_loop_phi()) {
+                hint = to_range->current_hint_operand();
+              }
+            } else {
+              if (live->Contains(to_vreg)) {
+                Define(curr_position, to, from);
+                live->Remove(to_vreg);
+              } else {
+                cur->Eliminate();
+                continue;
+              }
             }
           } else {
-            if (live->Contains(to_vreg)) {
-              Define(curr_position, to, from);
-              live->Remove(to_vreg);
-            } else {
-              cur->Eliminate();
-              continue;
-            }
+            Define(curr_position, to, from);
           }
-        } else {
-          Define(curr_position, to, from);
-        }
-        Use(block_start_position, curr_position, from, hint);
-        if (from->IsUnallocated()) {
-          live->Add(UnallocatedOperand::cast(from)->virtual_register());
+          Use(block_start_position, curr_position, from, hint);
+          if (from->IsUnallocated()) {
+            live->Add(UnallocatedOperand::cast(from)->virtual_register());
+          }
         }
       }
     } else {
       // Process output, inputs, and temps of this non-gap instruction.
       for (size_t i = 0; i < instr->OutputCount(); i++) {
-        InstructionOperand* output = instr->OutputAt(i);
+        auto output = instr->OutputAt(i);
         if (output->IsUnallocated()) {
           int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
           live->Remove(out_vreg);
@@ -1002,31 +1289,31 @@
           int out_vreg = output->index();
           live->Remove(out_vreg);
         }
-        Define(curr_position, output, NULL);
+        Define(curr_position, output, nullptr);
       }
 
       if (instr->ClobbersRegisters()) {
-        for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+        for (int i = 0; i < config()->num_general_registers(); ++i) {
           if (!IsOutputRegisterOf(instr, i)) {
-            LiveRange* range = FixedLiveRangeFor(i);
+            auto range = FixedLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
-                                  zone());
+                                  local_zone());
           }
         }
       }
 
       if (instr->ClobbersDoubleRegisters()) {
-        for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+        for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
           if (!IsOutputDoubleRegisterOf(instr, i)) {
-            LiveRange* range = FixedDoubleLiveRangeFor(i);
+            auto range = FixedDoubleLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
-                                  zone());
+                                  local_zone());
           }
         }
       }
 
       for (size_t i = 0; i < instr->InputCount(); i++) {
-        InstructionOperand* input = instr->InputAt(i);
+        auto input = instr->InputAt(i);
         if (input->IsImmediate()) continue;  // Ignore immediates.
         LifetimePosition use_pos;
         if (input->IsUnallocated() &&
@@ -1036,14 +1323,14 @@
           use_pos = curr_position.InstructionEnd();
         }
 
-        Use(block_start_position, use_pos, input, NULL);
+        Use(block_start_position, use_pos, input, nullptr);
         if (input->IsUnallocated()) {
           live->Add(UnallocatedOperand::cast(input)->virtual_register());
         }
       }
 
       for (size_t i = 0; i < instr->TempCount(); i++) {
-        InstructionOperand* temp = instr->TempAt(i);
+        auto temp = instr->TempAt(i);
         if (instr->ClobbersTemps()) {
           if (temp->IsRegister()) continue;
           if (temp->IsUnallocated()) {
@@ -1053,142 +1340,58 @@
             }
           }
         }
-        Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
-        Define(curr_position, temp, NULL);
+        Use(block_start_position, curr_position.InstructionEnd(), temp,
+            nullptr);
+        Define(curr_position, temp, nullptr);
       }
     }
   }
 }
 
 
-void RegisterAllocator::ResolvePhis(BasicBlock* block) {
-  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
-    Node* phi = *i;
-    if (phi->opcode() != IrOpcode::kPhi) continue;
-
-    UnallocatedOperand* phi_operand =
-        new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
-    phi_operand->set_virtual_register(phi->id());
-
-    int j = 0;
-    Node::Inputs inputs = phi->inputs();
-    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-         ++iter, ++j) {
-      Node* op = *iter;
-      // TODO(mstarzinger): Use a ValueInputIterator instead.
-      if (j >= block->PredecessorCount()) continue;
-      UnallocatedOperand* operand =
-          new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
-      operand->set_virtual_register(op->id());
-      BasicBlock* cur_block = block->PredecessorAt(j);
-      // The gap move must be added without any special processing as in
-      // the AddConstraintsGapMove.
-      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
-                         phi_operand);
-
-      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
-      DCHECK(!branch->HasPointerMap());
-      USE(branch);
+void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
+  for (auto phi : block->phis()) {
+    if (FLAG_turbo_reuse_spill_slots) {
+      auto res = phi_map_.insert(
+          std::make_pair(phi->virtual_register(), PhiMapValue(phi, block)));
+      DCHECK(res.second);
+      USE(res);
     }
-
-    LiveRange* live_range = LiveRangeFor(phi->id());
-    BlockStartInstruction* block_start = code()->GetBlockStart(block);
-    block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
-        ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
-    live_range->SetSpillStartIndex(block->first_instruction_index());
-
+    auto output = phi->output();
+    int phi_vreg = phi->virtual_register();
+    if (!FLAG_turbo_delay_ssa_decon) {
+      for (size_t i = 0; i < phi->operands().size(); ++i) {
+        InstructionBlock* cur_block =
+            code()->InstructionBlockAt(block->predecessors()[i]);
+        AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
+                   phi->inputs()[i], output);
+        DCHECK(!InstructionAt(cur_block->last_instruction_index())
+                    ->HasPointerMap());
+      }
+    }
+    auto live_range = LiveRangeFor(phi_vreg);
+    int gap_index = block->first_instruction_index();
+    live_range->SpillAtDefinition(local_zone(), gap_index, output);
+    live_range->SetSpillStartIndex(gap_index);
     // We use the phi-ness of some nodes in some later heuristics.
     live_range->set_is_phi(true);
-    if (!block->IsLoopHeader()) {
-      live_range->set_is_non_loop_phi(true);
-    }
+    live_range->set_is_non_loop_phi(!block->IsLoopHeader());
   }
 }
 
 
-bool RegisterAllocator::Allocate() {
-  assigned_registers_ = new (code_zone())
-      BitVector(Register::NumAllocatableRegisters(), code_zone());
-  assigned_double_registers_ = new (code_zone())
-      BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
-  MeetRegisterConstraints();
-  if (!AllocationOk()) return false;
-  ResolvePhis();
-  BuildLiveRanges();
-  AllocateGeneralRegisters();
-  if (!AllocationOk()) return false;
-  AllocateDoubleRegisters();
-  if (!AllocationOk()) return false;
-  PopulatePointerMaps();
-  ConnectRanges();
-  ResolveControlFlow();
-  code()->frame()->SetAllocatedRegisters(assigned_registers_);
-  code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
-  return true;
-}
-
-
 void RegisterAllocator::MeetRegisterConstraints() {
-  RegisterAllocatorPhase phase("L_Register constraints", this);
-  for (int i = 0; i < code()->BasicBlockCount(); ++i) {
-    MeetRegisterConstraints(code()->BlockAt(i));
-    if (!AllocationOk()) return;
+  for (auto block : code()->instruction_blocks()) {
+    MeetRegisterConstraints(block);
   }
 }
 
 
 void RegisterAllocator::ResolvePhis() {
-  RegisterAllocatorPhase phase("L_Resolve phis", this);
-
   // Process the blocks in reverse order.
-  for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
-    ResolvePhis(code()->BlockAt(i));
-  }
-}
-
-
-void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
-                                           BasicBlock* pred) {
-  LifetimePosition pred_end =
-      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
-  LifetimePosition cur_start =
-      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
-  LiveRange* pred_cover = NULL;
-  LiveRange* cur_cover = NULL;
-  LiveRange* cur_range = range;
-  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
-    if (cur_range->CanCover(cur_start)) {
-      DCHECK(cur_cover == NULL);
-      cur_cover = cur_range;
-    }
-    if (cur_range->CanCover(pred_end)) {
-      DCHECK(pred_cover == NULL);
-      pred_cover = cur_range;
-    }
-    cur_range = cur_range->next();
-  }
-
-  if (cur_cover->IsSpilled()) return;
-  DCHECK(pred_cover != NULL && cur_cover != NULL);
-  if (pred_cover != cur_cover) {
-    InstructionOperand* pred_op =
-        pred_cover->CreateAssignedOperand(code_zone());
-    InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
-    if (!pred_op->Equals(cur_op)) {
-      GapInstruction* gap = NULL;
-      if (block->PredecessorCount() == 1) {
-        gap = code()->GapAt(block->first_instruction_index());
-      } else {
-        DCHECK(pred->SuccessorCount() == 1);
-        gap = GetLastGap(pred);
-
-        Instruction* branch = InstructionAt(pred->last_instruction_index());
-        DCHECK(!branch->HasPointerMap());
-        USE(branch);
-      }
-      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
-          ->AddMove(pred_op, cur_op, code_zone());
-    }
+  for (auto i = code()->instruction_blocks().rbegin();
+       i != code()->instruction_blocks().rend(); ++i) {
+    ResolvePhis(*i);
   }
 }
 
@@ -1197,7 +1400,7 @@
     LifetimePosition pos) {
   int index = pos.InstructionIndex();
   if (code()->IsGapAt(index)) {
-    GapInstruction* gap = code()->GapAt(index);
+    auto gap = code()->GapAt(index);
     return gap->GetOrCreateParallelMove(
         pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
         code_zone());
@@ -1209,40 +1412,35 @@
 }
 
 
-BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
-  return code()->GetBasicBlock(pos.InstructionIndex());
+const InstructionBlock* RegisterAllocator::GetInstructionBlock(
+    LifetimePosition pos) {
+  return code()->GetInstructionBlock(pos.InstructionIndex());
 }
 
 
 void RegisterAllocator::ConnectRanges() {
-  RegisterAllocatorPhase phase("L_Connect ranges", this);
-  for (int i = 0; i < live_ranges()->length(); ++i) {
-    LiveRange* first_range = live_ranges()->at(i);
-    if (first_range == NULL || first_range->parent() != NULL) continue;
-
-    LiveRange* second_range = first_range->next();
-    while (second_range != NULL) {
-      LifetimePosition pos = second_range->Start();
-
+  for (auto first_range : live_ranges()) {
+    if (first_range == nullptr || first_range->IsChild()) continue;
+    auto second_range = first_range->next();
+    while (second_range != nullptr) {
+      auto pos = second_range->Start();
       if (!second_range->IsSpilled()) {
         // Add gap move if the two live ranges touch and there is no block
         // boundary.
         if (first_range->End().Value() == pos.Value()) {
           bool should_insert = true;
           if (IsBlockBoundary(pos)) {
-            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+            should_insert =
+                CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
           }
           if (should_insert) {
-            ParallelMove* move = GetConnectingParallelMove(pos);
-            InstructionOperand* prev_operand =
-                first_range->CreateAssignedOperand(code_zone());
-            InstructionOperand* cur_operand =
-                second_range->CreateAssignedOperand(code_zone());
+            auto move = GetConnectingParallelMove(pos);
+            auto prev_operand = first_range->CreateAssignedOperand(code_zone());
+            auto cur_operand = second_range->CreateAssignedOperand(code_zone());
             move->AddMove(prev_operand, cur_operand, code_zone());
           }
         }
       }
-
       first_range = second_range;
       second_range = second_range->next();
     }
@@ -1250,27 +1448,186 @@
 }
 
 
-bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
+bool RegisterAllocator::CanEagerlyResolveControlFlow(
+    const InstructionBlock* block) const {
   if (block->PredecessorCount() != 1) return false;
-  return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
+  return block->predecessors()[0].IsNext(block->rpo_number());
 }
 
 
+namespace {
+
+class LiveRangeBound {
+ public:
+  explicit LiveRangeBound(const LiveRange* range)
+      : range_(range), start_(range->Start()), end_(range->End()) {
+    DCHECK(!range->IsEmpty());
+  }
+
+  bool CanCover(LifetimePosition position) {
+    return start_.Value() <= position.Value() &&
+           position.Value() < end_.Value();
+  }
+
+  const LiveRange* const range_;
+  const LifetimePosition start_;
+  const LifetimePosition end_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
+};
+
+
+struct FindResult {
+  const LiveRange* cur_cover_;
+  const LiveRange* pred_cover_;
+};
+
+
+class LiveRangeBoundArray {
+ public:
+  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+
+  bool ShouldInitialize() { return start_ == nullptr; }
+
+  void Initialize(Zone* zone, const LiveRange* const range) {
+    size_t length = 0;
+    for (auto i = range; i != nullptr; i = i->next()) length++;
+    start_ = zone->NewArray<LiveRangeBound>(static_cast<int>(length));
+    length_ = length;
+    auto curr = start_;
+    for (auto i = range; i != nullptr; i = i->next(), ++curr) {
+      new (curr) LiveRangeBound(i);
+    }
+  }
+
+  LiveRangeBound* Find(const LifetimePosition position) const {
+    size_t left_index = 0;
+    size_t right_index = length_;
+    while (true) {
+      size_t current_index = left_index + (right_index - left_index) / 2;
+      DCHECK(right_index > current_index);
+      auto bound = &start_[current_index];
+      if (bound->start_.Value() <= position.Value()) {
+        if (position.Value() < bound->end_.Value()) return bound;
+        DCHECK(left_index < current_index);
+        left_index = current_index;
+      } else {
+        right_index = current_index;
+      }
+    }
+  }
+
+  LiveRangeBound* FindPred(const InstructionBlock* pred) {
+    auto pred_end =
+        LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+    return Find(pred_end);
+  }
+
+  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+    auto succ_start =
+        LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
+    return Find(succ_start);
+  }
+
+  void Find(const InstructionBlock* block, const InstructionBlock* pred,
+            FindResult* result) const {
+    auto pred_end =
+        LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+    auto bound = Find(pred_end);
+    result->pred_cover_ = bound->range_;
+    auto cur_start = LifetimePosition::FromInstructionIndex(
+        block->first_instruction_index());
+    // Common case.
+    if (bound->CanCover(cur_start)) {
+      result->cur_cover_ = bound->range_;
+      return;
+    }
+    result->cur_cover_ = Find(cur_start)->range_;
+    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+  }
+
+ private:
+  size_t length_;
+  LiveRangeBound* start_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
+};
+
+
+class LiveRangeFinder {
+ public:
+  explicit LiveRangeFinder(const RegisterAllocator& allocator)
+      : allocator_(allocator),
+        bounds_length_(static_cast<int>(allocator.live_ranges().size())),
+        bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
+            bounds_length_)) {
+    for (int i = 0; i < bounds_length_; ++i) {
+      new (&bounds_[i]) LiveRangeBoundArray();
+    }
+  }
+
+  LiveRangeBoundArray* ArrayFor(int operand_index) {
+    DCHECK(operand_index < bounds_length_);
+    auto range = allocator_.live_ranges()[operand_index];
+    DCHECK(range != nullptr && !range->IsEmpty());
+    auto array = &bounds_[operand_index];
+    if (array->ShouldInitialize()) {
+      array->Initialize(allocator_.local_zone(), range);
+    }
+    return array;
+  }
+
+ private:
+  const RegisterAllocator& allocator_;
+  const int bounds_length_;
+  LiveRangeBoundArray* const bounds_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
+};
+
+}  // namespace
+
+
 void RegisterAllocator::ResolveControlFlow() {
-  RegisterAllocatorPhase phase("L_Resolve control flow", this);
-  for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
-    BasicBlock* block = code()->BlockAt(block_id);
+  // Lazily linearize live ranges in memory for fast lookup.
+  LiveRangeFinder finder(*this);
+  for (auto block : code()->instruction_blocks()) {
     if (CanEagerlyResolveControlFlow(block)) continue;
-    BitVector* live = live_in_sets_[block->rpo_number_];
+    if (FLAG_turbo_delay_ssa_decon) {
+      // resolve phis
+      for (auto phi : block->phis()) {
+        auto* block_bound =
+            finder.ArrayFor(phi->virtual_register())->FindSucc(block);
+        auto phi_output =
+            block_bound->range_->CreateAssignedOperand(code_zone());
+        phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
+        size_t pred_index = 0;
+        for (auto pred : block->predecessors()) {
+          const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+          auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
+                                 ->FindPred(pred_block);
+          auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
+          phi->inputs()[pred_index] = pred_op;
+          ResolveControlFlow(block, phi_output, pred_block, pred_op);
+          pred_index++;
+        }
+      }
+    }
+    auto live = live_in_sets_[block->rpo_number().ToInt()];
     BitVector::Iterator iterator(live);
     while (!iterator.Done()) {
-      int operand_index = iterator.Current();
-      BasicBlock::Predecessors predecessors = block->predecessors();
-      for (BasicBlock::Predecessors::iterator i = predecessors.begin();
-           i != predecessors.end(); ++i) {
-        BasicBlock* cur = *i;
-        LiveRange* cur_range = LiveRangeFor(operand_index);
-        ResolveControlFlow(cur_range, block, cur);
+      auto* array = finder.ArrayFor(iterator.Current());
+      for (auto pred : block->predecessors()) {
+        FindResult result;
+        const auto* pred_block = code()->InstructionBlockAt(pred);
+        array->Find(block, pred_block, &result);
+        if (result.cur_cover_ == result.pred_cover_ ||
+            result.cur_cover_->IsSpilled())
+          continue;
+        auto pred_op = result.pred_cover_->CreateAssignedOperand(code_zone());
+        auto cur_op = result.cur_cover_->CreateAssignedOperand(code_zone());
+        ResolveControlFlow(block, cur_op, pred_block, pred_op);
       }
       iterator.Advance();
     }
@@ -1278,14 +1635,33 @@
 }
 
 
+void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
+                                           InstructionOperand* cur_op,
+                                           const InstructionBlock* pred,
+                                           InstructionOperand* pred_op) {
+  if (pred_op->Equals(cur_op)) return;
+  int gap_index;
+  GapInstruction::InnerPosition position;
+  if (block->PredecessorCount() == 1) {
+    gap_index = block->first_instruction_index();
+    position = GapInstruction::START;
+  } else {
+    DCHECK(pred->SuccessorCount() == 1);
+    DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
+    gap_index = pred->last_instruction_index() - 1;
+    position = GapInstruction::END;
+  }
+  AddGapMove(gap_index, position, pred_op, cur_op);
+}
+
+
 void RegisterAllocator::BuildLiveRanges() {
-  RegisterAllocatorPhase phase("L_Build live ranges", this);
-  InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
-  for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
+  for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
        --block_id) {
-    BasicBlock* block = code()->BlockAt(block_id);
-    BitVector* live = ComputeLiveOut(block);
+    auto block =
+        code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
+    auto live = ComputeLiveOut(block);
     // Initially consider all live_out values live for the entire block. We
     // will shorten these intervals if necessary.
     AddInitialIntervals(block, live);
@@ -1294,36 +1670,32 @@
     // live values.
     ProcessInstructions(block, live);
     // All phi output operands are killed by this block.
-    for (BasicBlock::const_iterator i = block->begin(); i != block->end();
-         ++i) {
-      Node* phi = *i;
-      if (phi->opcode() != IrOpcode::kPhi) continue;
-
+    for (auto phi : block->phis()) {
       // The live range interval already ends at the first instruction of the
       // block.
-      live->Remove(phi->id());
-
-      InstructionOperand* hint = NULL;
-      InstructionOperand* phi_operand = NULL;
-      GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
-
-      // TODO(titzer): no need to create the parallel move if it doesn't exit.
-      ParallelMove* move =
-          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-      for (int j = 0; j < move->move_operands()->length(); ++j) {
-        InstructionOperand* to = move->move_operands()->at(j).destination();
-        if (to->IsUnallocated() &&
-            UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
-          hint = move->move_operands()->at(j).source();
-          phi_operand = to;
-          break;
+      int phi_vreg = phi->virtual_register();
+      live->Remove(phi_vreg);
+      if (!FLAG_turbo_delay_ssa_decon) {
+        InstructionOperand* hint = nullptr;
+        InstructionOperand* phi_operand = nullptr;
+        auto gap =
+            GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
+        auto move =
+            gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
+        for (int j = 0; j < move->move_operands()->length(); ++j) {
+          auto to = move->move_operands()->at(j).destination();
+          if (to->IsUnallocated() &&
+              UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
+            hint = move->move_operands()->at(j).source();
+            phi_operand = to;
+            break;
+          }
         }
+        DCHECK(hint != nullptr);
+        auto block_start = LifetimePosition::FromInstructionIndex(
+            block->first_instruction_index());
+        Define(block_start, phi_operand, hint);
       }
-      DCHECK(hint != NULL);
-
-      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
-          block->first_instruction_index());
-      Define(block_start, phi_operand, hint);
     }
 
     // Now live is live_in for this block except not including values live
@@ -1334,71 +1706,39 @@
       // Add a live range stretching from the first loop instruction to the last
       // for each value live on entry to the header.
       BitVector::Iterator iterator(live);
-      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      auto start = LifetimePosition::FromInstructionIndex(
           block->first_instruction_index());
-      int end_index =
-          code()->BlockAt(block->loop_end_)->last_instruction_index();
-      LifetimePosition end =
-          LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
+      auto end = LifetimePosition::FromInstructionIndex(
+                     code()->LastLoopInstructionIndex(block)).NextInstruction();
       while (!iterator.Done()) {
         int operand_index = iterator.Current();
-        LiveRange* range = LiveRangeFor(operand_index);
-        range->EnsureInterval(start, end, zone());
+        auto range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end, local_zone());
         iterator.Advance();
       }
-
       // Insert all values into the live in sets of all blocks in the loop.
-      for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
+      for (int i = block->rpo_number().ToInt() + 1;
+           i < block->loop_end().ToInt(); ++i) {
         live_in_sets_[i]->Union(*live);
       }
     }
-
-#ifdef DEBUG
-    if (block_id == 0) {
-      BitVector::Iterator iterator(live);
-      bool found = false;
-      while (!iterator.Done()) {
-        found = true;
-        int operand_index = iterator.Current();
-        PrintF("Register allocator error: live v%d reached first block.\n",
-               operand_index);
-        LiveRange* range = LiveRangeFor(operand_index);
-        PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
-        CompilationInfo* info = code()->linkage()->info();
-        if (info->IsStub()) {
-          if (info->code_stub() == NULL) {
-            PrintF("\n");
-          } else {
-            CodeStub::Major major_key = info->code_stub()->MajorKey();
-            PrintF("  (function: %s)\n", CodeStub::MajorName(major_key, false));
-          }
-        } else {
-          DCHECK(info->IsOptimizing());
-          AllowHandleDereference allow_deref;
-          PrintF("  (function: %s)\n",
-                 info->function()->debug_name()->ToCString().get());
-        }
-        iterator.Advance();
-      }
-      DCHECK(!found);
-    }
-#endif
   }
 
-  for (int i = 0; i < live_ranges_.length(); ++i) {
-    if (live_ranges_[i] != NULL) {
-      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
-
-      // TODO(bmeurer): This is a horrible hack to make sure that for constant
-      // live ranges, every use requires the constant to be in a register.
-      // Without this hack, all uses with "any" policy would get the constant
-      // operand assigned.
-      LiveRange* range = live_ranges_[i];
-      if (range->HasAllocatedSpillOperand() &&
-          range->GetSpillOperand()->IsConstant()) {
-        for (UsePosition* pos = range->first_pos(); pos != NULL;
-             pos = pos->next_) {
-          pos->register_beneficial_ = true;
+  for (auto range : live_ranges()) {
+    if (range == nullptr) continue;
+    range->kind_ = RequiredRegisterKind(range->id());
+    // TODO(bmeurer): This is a horrible hack to make sure that for constant
+    // live ranges, every use requires the constant to be in a register.
+    // Without this hack, all uses with "any" policy would get the constant
+    // operand assigned.
+    if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
+      for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
+        pos->register_beneficial_ = true;
+        // TODO(dcarney): should the else case assert requires_reg_ == false?
+        // Can't mark phis as needing a register.
+        if (!code()
+                 ->InstructionAt(pos->pos().InstructionIndex())
+                 ->IsGapMoves()) {
           pos->requires_reg_ = true;
         }
       }
@@ -1407,12 +1747,30 @@
 }
 
 
+bool RegisterAllocator::ExistsUseWithoutDefinition() {
+  bool found = false;
+  BitVector::Iterator iterator(live_in_sets_[0]);
+  while (!iterator.Done()) {
+    found = true;
+    int operand_index = iterator.Current();
+    PrintF("Register allocator error: live v%d reached first block.\n",
+           operand_index);
+    LiveRange* range = LiveRangeFor(operand_index);
+    PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
+    if (debug_name() == nullptr) {
+      PrintF("\n");
+    } else {
+      PrintF("  (function: %s)\n", debug_name());
+    }
+    iterator.Advance();
+  }
+  return found;
+}
+
+
 bool RegisterAllocator::SafePointsAreInOrder() const {
   int safe_point = 0;
-  const PointerMapDeque* pointer_maps = code()->pointer_maps();
-  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
-       it != pointer_maps->end(); ++it) {
-    PointerMap* map = *it;
+  for (auto map : *code()->pointer_maps()) {
     if (safe_point > map->instruction_position()) return false;
     safe_point = map->instruction_position();
   }
@@ -1421,20 +1779,17 @@
 
 
 void RegisterAllocator::PopulatePointerMaps() {
-  RegisterAllocatorPhase phase("L_Populate pointer maps", this);
-
   DCHECK(SafePointsAreInOrder());
 
   // Iterate over all safe point positions and record a pointer
   // for all spilled live ranges at this point.
   int last_range_start = 0;
-  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  auto pointer_maps = code()->pointer_maps();
   PointerMapDeque::const_iterator first_it = pointer_maps->begin();
-  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
-    LiveRange* range = live_ranges()->at(range_idx);
-    if (range == NULL) continue;
+  for (LiveRange* range : live_ranges()) {
+    if (range == nullptr) continue;
     // Iterate over the first parts of multi-part live ranges.
-    if (range->parent() != NULL) continue;
+    if (range->IsChild()) continue;
     // Skip non-reference values.
     if (!HasTaggedValue(range->id())) continue;
     // Skip empty live ranges.
@@ -1443,8 +1798,8 @@
     // Find the extent of the range and its children.
     int start = range->Start().InstructionIndex();
     int end = 0;
-    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
-      LifetimePosition this_end = cur->End();
+    for (auto cur = range; cur != nullptr; cur = cur->next()) {
+      auto this_end = cur->End();
       if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
       DCHECK(cur->Start().InstructionIndex() >= start);
     }
@@ -1457,14 +1812,13 @@
     // Step across all the safe points that are before the start of this range,
     // recording how far we step in order to save doing this for the next range.
     for (; first_it != pointer_maps->end(); ++first_it) {
-      PointerMap* map = *first_it;
+      auto map = *first_it;
       if (map->instruction_position() >= start) break;
     }
 
     // Step through the safe points to see whether they are in the range.
-    for (PointerMapDeque::const_iterator it = first_it;
-         it != pointer_maps->end(); ++it) {
-      PointerMap* map = *it;
+    for (auto it = first_it; it != pointer_maps->end(); ++it) {
+      auto map = *it;
       int safe_point = map->instruction_position();
 
       // The safe points are sorted so we can stop searching here.
@@ -1472,17 +1826,16 @@
 
       // Advance to the next active range that covers the current
       // safe point position.
-      LifetimePosition safe_point_pos =
-          LifetimePosition::FromInstructionIndex(safe_point);
-      LiveRange* cur = range;
-      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+      auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
+      auto cur = range;
+      while (cur != nullptr && !cur->Covers(safe_point_pos)) {
         cur = cur->next();
       }
-      if (cur == NULL) continue;
+      if (cur == nullptr) continue;
 
       // Check if the live range is spilled and the safe point is after
       // the spill position.
-      if (range->HasAllocatedSpillOperand() &&
+      if (range->HasSpillOperand() &&
           safe_point >= range->spill_start_index() &&
           !range->GetSpillOperand()->IsConstant()) {
         TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
@@ -1505,76 +1858,73 @@
 
 
 void RegisterAllocator::AllocateGeneralRegisters() {
-  RegisterAllocatorPhase phase("L_Allocate general registers", this);
-  num_registers_ = Register::NumAllocatableRegisters();
+  num_registers_ = config()->num_general_registers();
   mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
 
 
 void RegisterAllocator::AllocateDoubleRegisters() {
-  RegisterAllocatorPhase phase("L_Allocate double registers", this);
-  num_registers_ = DoubleRegister::NumAllocatableRegisters();
+  num_registers_ = config()->num_aliased_double_registers();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
 
 
 void RegisterAllocator::AllocateRegisters() {
-  DCHECK(unhandled_live_ranges_.is_empty());
+  DCHECK(unhandled_live_ranges().empty());
 
-  for (int i = 0; i < live_ranges_.length(); ++i) {
-    if (live_ranges_[i] != NULL) {
-      if (live_ranges_[i]->Kind() == mode_) {
-        AddToUnhandledUnsorted(live_ranges_[i]);
-      }
+  for (auto range : live_ranges()) {
+    if (range == nullptr) continue;
+    if (range->Kind() == mode_) {
+      AddToUnhandledUnsorted(range);
     }
   }
   SortUnhandled();
   DCHECK(UnhandledIsSorted());
 
-  DCHECK(reusable_slots_.is_empty());
-  DCHECK(active_live_ranges_.is_empty());
-  DCHECK(inactive_live_ranges_.is_empty());
+  DCHECK(reusable_slots().empty());
+  DCHECK(active_live_ranges().empty());
+  DCHECK(inactive_live_ranges().empty());
 
   if (mode_ == DOUBLE_REGISTERS) {
-    for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
-      LiveRange* current = fixed_double_live_ranges_.at(i);
-      if (current != NULL) {
+    for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
+      auto current = fixed_double_live_ranges()[i];
+      if (current != nullptr) {
         AddToInactive(current);
       }
     }
   } else {
     DCHECK(mode_ == GENERAL_REGISTERS);
-    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
-      LiveRange* current = fixed_live_ranges_.at(i);
-      if (current != NULL) {
+    for (auto current : fixed_live_ranges()) {
+      if (current != nullptr) {
         AddToInactive(current);
       }
     }
   }
 
-  while (!unhandled_live_ranges_.is_empty()) {
+  while (!unhandled_live_ranges().empty()) {
     DCHECK(UnhandledIsSorted());
-    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    auto current = unhandled_live_ranges().back();
+    unhandled_live_ranges().pop_back();
     DCHECK(UnhandledIsSorted());
-    LifetimePosition position = current->Start();
+    auto position = current->Start();
 #ifdef DEBUG
     allocation_finger_ = position;
 #endif
     TraceAlloc("Processing interval %d start=%d\n", current->id(),
                position.Value());
 
-    if (current->HasAllocatedSpillOperand()) {
+    if (!current->HasNoSpillType()) {
       TraceAlloc("Live range %d already has a spill operand\n", current->id());
-      LifetimePosition next_pos = position;
+      auto next_pos = position;
       if (code()->IsGapAt(next_pos.InstructionIndex())) {
         next_pos = next_pos.NextInstruction();
       }
-      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
       // If the range already has a spill operand and it doesn't need a
       // register immediately, split it and spill the first part of the range.
-      if (pos == NULL) {
+      if (pos == nullptr) {
         Spill(current);
         continue;
       } else if (pos->pos().Value() >
@@ -1588,8 +1938,15 @@
       }
     }
 
-    for (int i = 0; i < active_live_ranges_.length(); ++i) {
-      LiveRange* cur_active = active_live_ranges_.at(i);
+    if (FLAG_turbo_reuse_spill_slots) {
+      if (TryReuseSpillForPhi(current)) {
+        continue;
+      }
+      if (!AllocationOk()) return;
+    }
+
+    for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+      auto cur_active = active_live_ranges()[i];
       if (cur_active->End().Value() <= position.Value()) {
         ActiveToHandled(cur_active);
         --i;  // The live range was removed from the list of active live ranges.
@@ -1599,8 +1956,8 @@
       }
     }
 
-    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+      auto cur_inactive = inactive_live_ranges()[i];
       if (cur_inactive->End().Value() <= position.Value()) {
         InactiveToHandled(cur_inactive);
         --i;  // Live range was removed from the list of inactive live ranges.
@@ -1623,27 +1980,17 @@
     }
   }
 
-  reusable_slots_.Rewind(0);
-  active_live_ranges_.Rewind(0);
-  inactive_live_ranges_.Rewind(0);
+  reusable_slots().clear();
+  active_live_ranges().clear();
+  inactive_live_ranges().clear();
 }
 
 
 const char* RegisterAllocator::RegisterName(int allocation_index) {
   if (mode_ == GENERAL_REGISTERS) {
-    return Register::AllocationIndexToString(allocation_index);
+    return config()->general_register_name(allocation_index);
   } else {
-    return DoubleRegister::AllocationIndexToString(allocation_index);
-  }
-}
-
-
-void RegisterAllocator::TraceAlloc(const char* msg, ...) {
-  if (FLAG_trace_alloc) {
-    va_list arguments;
-    va_start(arguments, msg);
-    base::OS::VPrint(msg, arguments);
-    va_end(arguments);
+    return config()->double_register_name(allocation_index);
   }
 }
 
@@ -1662,49 +2009,49 @@
 
 void RegisterAllocator::AddToActive(LiveRange* range) {
   TraceAlloc("Add live range %d to active\n", range->id());
-  active_live_ranges_.Add(range, zone());
+  active_live_ranges().push_back(range);
 }
 
 
 void RegisterAllocator::AddToInactive(LiveRange* range) {
   TraceAlloc("Add live range %d to inactive\n", range->id());
-  inactive_live_ranges_.Add(range, zone());
+  inactive_live_ranges().push_back(range);
 }
 
 
 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
-  if (range == NULL || range->IsEmpty()) return;
+  if (range == nullptr || range->IsEmpty()) return;
   DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
   DCHECK(allocation_finger_.Value() <= range->Start().Value());
-  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
-    LiveRange* cur_range = unhandled_live_ranges_.at(i);
-    if (range->ShouldBeAllocatedBefore(cur_range)) {
-      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
-      unhandled_live_ranges_.InsertAt(i + 1, range, zone());
-      DCHECK(UnhandledIsSorted());
-      return;
-    }
+  for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
+       --i) {
+    auto cur_range = unhandled_live_ranges().at(i);
+    if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
+    TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+    auto it = unhandled_live_ranges().begin() + (i + 1);
+    unhandled_live_ranges().insert(it, range);
+    DCHECK(UnhandledIsSorted());
+    return;
   }
   TraceAlloc("Add live range %d to unhandled at start\n", range->id());
-  unhandled_live_ranges_.InsertAt(0, range, zone());
+  unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
   DCHECK(UnhandledIsSorted());
 }
 
 
 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
-  if (range == NULL || range->IsEmpty()) return;
+  if (range == nullptr || range->IsEmpty()) return;
   DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
   TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
-  unhandled_live_ranges_.Add(range, zone());
+  unhandled_live_ranges().push_back(range);
 }
 
 
-static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
-  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
-         !(*b)->ShouldBeAllocatedBefore(*a));
-  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
-  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
-  return (*a)->id() - (*b)->id();
+static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
+  DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
+  if (a->ShouldBeAllocatedBefore(b)) return false;
+  if (b->ShouldBeAllocatedBefore(a)) return true;
+  return a->id() < b->id();
 }
 
 
@@ -1713,15 +2060,16 @@
 // algorithm because it is efficient to remove elements from the end.
 void RegisterAllocator::SortUnhandled() {
   TraceAlloc("Sort unhandled\n");
-  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+  std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
+            &UnhandledSortHelper);
 }
 
 
 bool RegisterAllocator::UnhandledIsSorted() {
-  int len = unhandled_live_ranges_.length();
-  for (int i = 1; i < len; i++) {
-    LiveRange* a = unhandled_live_ranges_.at(i - 1);
-    LiveRange* b = unhandled_live_ranges_.at(i);
+  size_t len = unhandled_live_ranges().size();
+  for (size_t i = 1; i < len; i++) {
+    auto a = unhandled_live_ranges().at(i - 1);
+    auto b = unhandled_live_ranges().at(i);
     if (a->Start().Value() < b->Start().Value()) return false;
   }
   return true;
@@ -1729,95 +2077,81 @@
 
 
 void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+  DCHECK(!FLAG_turbo_reuse_spill_slots);
   // Check that we are the last range.
-  if (range->next() != NULL) return;
-
-  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
-
-  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+  if (range->next() != nullptr) return;
+  if (!range->TopLevel()->HasSpillOperand()) return;
+  auto spill_operand = range->TopLevel()->GetSpillOperand();
   if (spill_operand->IsConstant()) return;
   if (spill_operand->index() >= 0) {
-    reusable_slots_.Add(range, zone());
+    reusable_slots().push_back(range);
   }
 }
 
 
 InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
-  if (reusable_slots_.is_empty()) return NULL;
-  if (reusable_slots_.first()->End().Value() >
+  DCHECK(!FLAG_turbo_reuse_spill_slots);
+  if (reusable_slots().empty()) return nullptr;
+  if (reusable_slots().front()->End().Value() >
       range->TopLevel()->Start().Value()) {
-    return NULL;
+    return nullptr;
   }
-  InstructionOperand* result =
-      reusable_slots_.first()->TopLevel()->GetSpillOperand();
-  reusable_slots_.Remove(0);
+  auto result = reusable_slots().front()->TopLevel()->GetSpillOperand();
+  reusable_slots().erase(reusable_slots().begin());
   return result;
 }
 
 
 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
-  DCHECK(active_live_ranges_.Contains(range));
-  active_live_ranges_.RemoveElement(range);
+  RemoveElement(&active_live_ranges(), range);
   TraceAlloc("Moving live range %d from active to handled\n", range->id());
-  FreeSpillSlot(range);
+  if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
 }
 
 
 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
-  DCHECK(active_live_ranges_.Contains(range));
-  active_live_ranges_.RemoveElement(range);
-  inactive_live_ranges_.Add(range, zone());
+  RemoveElement(&active_live_ranges(), range);
+  inactive_live_ranges().push_back(range);
   TraceAlloc("Moving live range %d from active to inactive\n", range->id());
 }
 
 
 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
-  DCHECK(inactive_live_ranges_.Contains(range));
-  inactive_live_ranges_.RemoveElement(range);
+  RemoveElement(&inactive_live_ranges(), range);
   TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
-  FreeSpillSlot(range);
+  if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
 }
 
 
 void RegisterAllocator::InactiveToActive(LiveRange* range) {
-  DCHECK(inactive_live_ranges_.Contains(range));
-  inactive_live_ranges_.RemoveElement(range);
-  active_live_ranges_.Add(range, zone());
+  RemoveElement(&inactive_live_ranges(), range);
+  active_live_ranges().push_back(range);
   TraceAlloc("Moving live range %d from inactive to active\n", range->id());
 }
 
 
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
-              Register::kMaxNumAllocatableRegisters);
-
-
 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
 
   for (int i = 0; i < num_registers_; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* cur_active = active_live_ranges_.at(i);
+  for (auto cur_active : active_live_ranges()) {
     free_until_pos[cur_active->assigned_register()] =
         LifetimePosition::FromInstructionIndex(0);
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+  for (auto cur_inactive : inactive_live_ranges()) {
     DCHECK(cur_inactive->End().Value() > current->Start().Value());
-    LifetimePosition next_intersection =
-        cur_inactive->FirstIntersection(current);
+    auto next_intersection = cur_inactive->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = cur_inactive->assigned_register();
     free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
   }
 
-  InstructionOperand* hint = current->FirstHint();
-  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+  auto hint = current->FirstHint();
+  if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
     int register_index = hint->index();
     TraceAlloc(
         "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
@@ -1841,7 +2175,7 @@
     }
   }
 
-  LifetimePosition pos = free_until_pos[reg];
+  auto pos = free_until_pos[reg];
 
   if (pos.Value() <= current->Start().Value()) {
     // All registers are blocked.
@@ -1851,12 +2185,11 @@
   if (pos.Value() < current->End().Value()) {
     // Register reg is available at the range start but becomes blocked before
     // the range end. Split current at position where it becomes blocked.
-    LiveRange* tail = SplitRangeAt(current, pos);
+    auto tail = SplitRangeAt(current, pos);
     if (!AllocationOk()) return false;
     AddToUnhandledSorted(tail);
   }
 
-
   // Register reg is available at the range start and is free until
   // the range end.
   DCHECK(pos.Value() >= current->End().Value());
@@ -1869,32 +2202,30 @@
 
 
 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
-  UsePosition* register_use = current->NextRegisterPosition(current->Start());
-  if (register_use == NULL) {
+  auto register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == nullptr) {
     // There is no use in the current live range that requires a register.
     // We can just spill it.
     Spill(current);
     return;
   }
 
-
-  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
-  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
+  LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
 
   for (int i = 0; i < num_registers_; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* range = active_live_ranges_[i];
+  for (auto range : active_live_ranges()) {
     int cur_reg = range->assigned_register();
     if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
       block_pos[cur_reg] = use_pos[cur_reg] =
           LifetimePosition::FromInstructionIndex(0);
     } else {
-      UsePosition* next_use =
+      auto next_use =
           range->NextUsePositionRegisterIsBeneficial(current->Start());
-      if (next_use == NULL) {
+      if (next_use == nullptr) {
         use_pos[cur_reg] = range->End();
       } else {
         use_pos[cur_reg] = next_use->pos();
@@ -1902,10 +2233,9 @@
     }
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* range = inactive_live_ranges_.at(i);
+  for (auto range : inactive_live_ranges()) {
     DCHECK(range->End().Value() > current->Start().Value());
-    LifetimePosition next_intersection = range->FirstIntersection(current);
+    auto next_intersection = range->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     if (range->IsFixed()) {
@@ -1923,7 +2253,7 @@
     }
   }
 
-  LifetimePosition pos = use_pos[reg];
+  auto pos = use_pos[reg];
 
   if (pos.Value() < register_use->pos().Value()) {
     // All registers are blocked before the first use that requires a register.
@@ -1954,32 +2284,40 @@
 }
 
 
+static const InstructionBlock* GetContainingLoop(
+    const InstructionSequence* sequence, const InstructionBlock* block) {
+  auto index = block->loop_header();
+  if (!index.IsValid()) return nullptr;
+  return sequence->InstructionBlockAt(index);
+}
+
+
 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
     LiveRange* range, LifetimePosition pos) {
-  BasicBlock* block = GetBlock(pos.InstructionStart());
-  BasicBlock* loop_header =
-      block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
+  auto block = GetInstructionBlock(pos.InstructionStart());
+  auto loop_header =
+      block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
 
-  if (loop_header == NULL) return pos;
+  if (loop_header == nullptr) return pos;
 
-  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+  auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
 
-  while (loop_header != NULL) {
+  while (loop_header != nullptr) {
     // We are going to spill live range inside the loop.
     // If possible try to move spilling position backwards to loop header.
     // This will reduce number of memory moves on the back edge.
-    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+    auto loop_start = LifetimePosition::FromInstructionIndex(
         loop_header->first_instruction_index());
 
     if (range->Covers(loop_start)) {
-      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+      if (prev_use == nullptr || prev_use->pos().Value() < loop_start.Value()) {
         // No register beneficial use inside the loop before the pos.
         pos = loop_start;
       }
     }
 
     // Try hoisting out to an outer loop.
-    loop_header = code()->GetContainingLoop(loop_header);
+    loop_header = GetContainingLoop(code(), loop_header);
   }
 
   return pos;
@@ -1989,13 +2327,13 @@
 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
   DCHECK(current->HasRegisterAssigned());
   int reg = current->assigned_register();
-  LifetimePosition split_pos = current->Start();
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* range = active_live_ranges_[i];
+  auto split_pos = current->Start();
+  for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+    auto range = active_live_ranges()[i];
     if (range->assigned_register() == reg) {
-      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
-      if (next_pos == NULL) {
+      auto next_pos = range->NextRegisterPosition(current->Start());
+      auto spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == nullptr) {
         SpillAfter(range, spill_pos);
       } else {
         // When spilling between spill_pos and next_pos ensure that the range
@@ -2014,14 +2352,14 @@
     }
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* range = inactive_live_ranges_[i];
+  for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+    auto range = inactive_live_ranges()[i];
     DCHECK(range->End().Value() > current->Start().Value());
     if (range->assigned_register() == reg && !range->IsFixed()) {
       LifetimePosition next_intersection = range->FirstIntersection(current);
       if (next_intersection.IsValid()) {
         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-        if (next_pos == NULL) {
+        if (next_pos == nullptr) {
           SpillAfter(range, split_pos);
         } else {
           next_intersection = Min(next_intersection, next_pos->pos());
@@ -2055,9 +2393,9 @@
          !InstructionAt(pos.InstructionIndex())->IsControl());
 
   int vreg = GetVirtualRegister();
-  if (!AllocationOk()) return NULL;
-  LiveRange* result = LiveRangeFor(vreg);
-  range->SplitAt(pos, result, zone());
+  if (!AllocationOk()) return nullptr;
+  auto result = LiveRangeFor(vreg);
+  range->SplitAt(pos, result, local_zone());
   return result;
 }
 
@@ -2069,7 +2407,7 @@
   TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
              range->id(), start.Value(), end.Value());
 
-  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  auto split_pos = FindOptimalSplitPos(start, end);
   DCHECK(split_pos.Value() >= start.Value());
   return SplitRangeAt(range, split_pos);
 }
@@ -2084,8 +2422,8 @@
   // We have no choice
   if (start_instr == end_instr) return end;
 
-  BasicBlock* start_block = GetBlock(start);
-  BasicBlock* end_block = GetBlock(end);
+  auto start_block = GetInstructionBlock(start);
+  auto end_block = GetInstructionBlock(end);
 
   if (end_block == start_block) {
     // The interval is split in the same basic block. Split at the latest
@@ -2093,13 +2431,13 @@
     return end;
   }
 
-  BasicBlock* block = end_block;
+  auto block = end_block;
   // Find header of outermost loop.
   // TODO(titzer): fix redundancy below.
-  while (code()->GetContainingLoop(block) != NULL &&
-         code()->GetContainingLoop(block)->rpo_number_ >
-             start_block->rpo_number_) {
-    block = code()->GetContainingLoop(block);
+  while (GetContainingLoop(code(), block) != nullptr &&
+         GetContainingLoop(code(), block)->rpo_number().ToInt() >
+             start_block->rpo_number().ToInt()) {
+    block = GetContainingLoop(code(), block);
   }
 
   // We did not find any suitable outer loop. Split at the latest possible
@@ -2112,7 +2450,7 @@
 
 
 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
-  LiveRange* second_part = SplitRangeAt(range, pos);
+  auto second_part = SplitRangeAt(range, pos);
   if (!AllocationOk()) return;
   Spill(second_part);
 }
@@ -2129,14 +2467,14 @@
                                           LifetimePosition until,
                                           LifetimePosition end) {
   CHECK(start.Value() < end.Value());
-  LiveRange* second_part = SplitRangeAt(range, start);
+  auto second_part = SplitRangeAt(range, start);
   if (!AllocationOk()) return;
 
   if (second_part->Start().Value() < end.Value()) {
     // The split result intersects with [start, end[.
     // Split it at position between ]start+1, end[, spill the middle part
     // and put the rest to unhandled.
-    LiveRange* third_part = SplitBetween(
+    auto third_part = SplitBetween(
         second_part, Max(second_part->Start().InstructionEnd(), until),
         end.PrevInstruction().InstructionEnd());
     if (!AllocationOk()) return;
@@ -2156,24 +2494,25 @@
 void RegisterAllocator::Spill(LiveRange* range) {
   DCHECK(!range->IsSpilled());
   TraceAlloc("Spilling live range %d\n", range->id());
-  LiveRange* first = range->TopLevel();
-
-  if (!first->HasAllocatedSpillOperand()) {
-    InstructionOperand* op = TryReuseSpillSlot(range);
-    if (op == NULL) {
-      // Allocate a new operand referring to the spill slot.
-      RegisterKind kind = range->Kind();
-      int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
-      if (kind == DOUBLE_REGISTERS) {
-        op = DoubleStackSlotOperand::Create(index, zone());
-      } else {
-        DCHECK(kind == GENERAL_REGISTERS);
-        op = StackSlotOperand::Create(index, zone());
+  auto first = range->TopLevel();
+  if (first->HasNoSpillType()) {
+    if (FLAG_turbo_reuse_spill_slots) {
+      AssignSpillRangeToLiveRange(first);
+    } else {
+      auto op = TryReuseSpillSlot(range);
+      if (op == nullptr) {
+        // Allocate a new operand referring to the spill slot.
+        RegisterKind kind = range->Kind();
+        int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+        auto op_kind = kind == DOUBLE_REGISTERS
+                           ? InstructionOperand::DOUBLE_STACK_SLOT
+                           : InstructionOperand::STACK_SLOT;
+        op = new (code_zone()) InstructionOperand(op_kind, index);
       }
+      first->SetSpillOperand(op);
     }
-    first->SetSpillOperand(op);
   }
-  range->MakeSpilled(code_zone());
+  range->MakeSpilled();
 }
 
 
@@ -2184,9 +2523,8 @@
 
 
 void RegisterAllocator::Verify() const {
-  for (int i = 0; i < live_ranges()->length(); ++i) {
-    LiveRange* current = live_ranges()->at(i);
-    if (current != NULL) current->Verify();
+  for (auto current : live_ranges()) {
+    if (current != nullptr) current->Verify();
   }
 }
 
@@ -2205,28 +2543,6 @@
   range->set_assigned_register(reg, code_zone());
 }
 
-
-RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name,
-                                               RegisterAllocator* allocator)
-    : CompilationPhase(name, allocator->code()->linkage()->info()),
-      allocator_(allocator) {
-  if (FLAG_turbo_stats) {
-    allocator_zone_start_allocation_size_ =
-        allocator->zone()->allocation_size();
-  }
-}
-
-
-RegisterAllocatorPhase::~RegisterAllocatorPhase() {
-  if (FLAG_turbo_stats) {
-    unsigned size = allocator_->zone()->allocation_size() -
-                    allocator_zone_start_allocation_size_;
-    isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
-  }
-#ifdef DEBUG
-  if (allocator_ != NULL) allocator_->Verify();
-#endif
-}
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 881ce37..b17837b 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -5,23 +5,11 @@
 #ifndef V8_REGISTER_ALLOCATOR_H_
 #define V8_REGISTER_ALLOCATOR_H_
 
-#include "src/allocation.h"
 #include "src/compiler/instruction.h"
-#include "src/compiler/node.h"
-#include "src/compiler/schedule.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
-
-// Forward declarations.
-class BitVector;
-class InstructionOperand;
-class UnallocatedOperand;
-class ParallelMove;
-class PointerMap;
-
 namespace compiler {
 
 enum RegisterKind {
@@ -35,7 +23,7 @@
 // each instruction there are exactly two lifetime positions: the beginning and
 // the end of the instruction. Lifetime positions for different instructions are
 // disjoint.
-class LifetimePosition {
+class LifetimePosition FINAL {
  public:
   // Return the lifetime position that corresponds to the beginning of
   // the instruction with the given index.
@@ -114,10 +102,10 @@
 
 
 // Representation of the non-empty interval [start,end[.
-class UseInterval : public ZoneObject {
+class UseInterval FINAL : public ZoneObject {
  public:
   UseInterval(LifetimePosition start, LifetimePosition end)
-      : start_(start), end_(end), next_(NULL) {
+      : start_(start), end_(end), next_(nullptr) {
     DCHECK(start.Value() < end.Value());
   }
 
@@ -147,16 +135,20 @@
   LifetimePosition start_;
   LifetimePosition end_;
   UseInterval* next_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(UseInterval);
 };
 
+
 // Representation of a use position.
-class UsePosition : public ZoneObject {
+class UsePosition FINAL : public ZoneObject {
  public:
   UsePosition(LifetimePosition pos, InstructionOperand* operand,
               InstructionOperand* hint);
 
   InstructionOperand* operand() const { return operand_; }
-  bool HasOperand() const { return operand_ != NULL; }
+  bool HasOperand() const { return operand_ != nullptr; }
 
   InstructionOperand* hint() const { return hint_; }
   bool HasHint() const;
@@ -172,13 +164,18 @@
   InstructionOperand* const hint_;
   LifetimePosition const pos_;
   UsePosition* next_;
-  bool requires_reg_;
-  bool register_beneficial_;
+  bool requires_reg_ : 1;
+  bool register_beneficial_ : 1;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(UsePosition);
 };
 
+class SpillRange;
+
 // Representation of SSA values' live ranges as a collection of (continuous)
 // intervals over the instruction ordering.
-class LiveRange : public ZoneObject {
+class LiveRange FINAL : public ZoneObject {
  public:
   static const int kInvalidAssignment = 0x7fffffff;
 
@@ -187,17 +184,20 @@
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
   LiveRange* parent() const { return parent_; }
-  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* TopLevel() { return (parent_ == nullptr) ? this : parent_; }
+  const LiveRange* TopLevel() const {
+    return (parent_ == nullptr) ? this : parent_;
+  }
   LiveRange* next() const { return next_; }
-  bool IsChild() const { return parent() != NULL; }
+  bool IsChild() const { return parent() != nullptr; }
   int id() const { return id_; }
   bool IsFixed() const { return id_ < 0; }
-  bool IsEmpty() const { return first_interval() == NULL; }
-  InstructionOperand* CreateAssignedOperand(Zone* zone);
+  bool IsEmpty() const { return first_interval() == nullptr; }
+  InstructionOperand* CreateAssignedOperand(Zone* zone) const;
   int assigned_register() const { return assigned_register_; }
   int spill_start_index() const { return spill_start_index_; }
   void set_assigned_register(int reg, Zone* zone);
-  void MakeSpilled(Zone* zone);
+  void MakeSpilled();
   bool is_phi() const { return is_phi_; }
   void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
   bool is_non_loop_phi() const { return is_non_loop_phi_; }
@@ -245,9 +245,9 @@
   }
   InstructionOperand* FirstHint() const {
     UsePosition* pos = first_pos_;
-    while (pos != NULL && !pos->HasHint()) pos = pos->next();
-    if (pos != NULL) return pos->hint();
-    return NULL;
+    while (pos != nullptr && !pos->HasHint()) pos = pos->next();
+    if (pos != nullptr) return pos->hint();
+    return nullptr;
   }
 
   LifetimePosition Start() const {
@@ -260,9 +260,27 @@
     return last_interval_->end();
   }
 
-  bool HasAllocatedSpillOperand() const;
-  InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+  enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
+  SpillType spill_type() const { return spill_type_; }
+  InstructionOperand* GetSpillOperand() const {
+    return spill_type_ == SpillType::kSpillOperand ? spill_operand_ : nullptr;
+  }
+  SpillRange* GetSpillRange() const {
+    return spill_type_ == SpillType::kSpillRange ? spill_range_ : nullptr;
+  }
+  bool HasNoSpillType() const { return spill_type_ == SpillType::kNoSpillType; }
+  bool HasSpillOperand() const {
+    return spill_type_ == SpillType::kSpillOperand;
+  }
+  bool HasSpillRange() const { return spill_type_ == SpillType::kSpillRange; }
+
+  void SpillAtDefinition(Zone* zone, int gap_index,
+                         InstructionOperand* operand);
   void SetSpillOperand(InstructionOperand* operand);
+  void SetSpillRange(SpillRange* spill_range);
+  void CommitSpillOperand(InstructionOperand* operand);
+  void CommitSpillsAtDefinition(InstructionSequence* sequence,
+                                InstructionOperand* operand);
 
   void SetSpillStartIndex(int start) {
     spill_start_index_ = Min(start, spill_start_index_);
@@ -289,11 +307,14 @@
 #endif
 
  private:
-  void ConvertOperands(Zone* zone);
+  struct SpillAtDefinitionList;
+
+  void ConvertUsesToOperand(InstructionOperand* op);
   UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
   void AdvanceLastProcessedMarker(UseInterval* to_start_of,
                                   LifetimePosition but_not_past) const;
 
+  // TODO(dcarney): pack this structure better.
   int id_;
   bool spilled_;
   bool is_phi_;
@@ -310,45 +331,96 @@
   UsePosition* last_processed_use_;
   // This is used as a cache, it's invalid outside of BuildLiveRanges.
   InstructionOperand* current_hint_operand_;
-  InstructionOperand* spill_operand_;
   int spill_start_index_;
+  SpillType spill_type_;
+  union {
+    InstructionOperand* spill_operand_;
+    SpillRange* spill_range_;
+  };
+  SpillAtDefinitionList* spills_at_definition_;
 
   friend class RegisterAllocator;  // Assigns to kind_.
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRange);
 };
 
 
-class RegisterAllocator BASE_EMBEDDED {
+class SpillRange FINAL : public ZoneObject {
  public:
-  explicit RegisterAllocator(InstructionSequence* code);
+  SpillRange(LiveRange* range, Zone* zone);
 
-  static void TraceAlloc(const char* msg, ...);
+  UseInterval* interval() const { return use_interval_; }
+  RegisterKind Kind() const { return live_ranges_[0]->Kind(); }
+  bool IsEmpty() const { return live_ranges_.empty(); }
+  bool TryMerge(SpillRange* other);
+  void SetOperand(InstructionOperand* op);
 
-  // Checks whether the value of a given virtual register is a reference.
-  // TODO(titzer): rename this to IsReference.
-  bool HasTaggedValue(int virtual_register) const;
+ private:
+  LifetimePosition End() const { return end_position_; }
+  ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
+  bool IsIntersectingWith(SpillRange* other) const;
+  // Merge intervals, making sure the use intervals are sorted
+  void MergeDisjointIntervals(UseInterval* other);
 
-  // Returns the register kind required by the given virtual register.
-  RegisterKind RequiredRegisterKind(int virtual_register) const;
+  ZoneVector<LiveRange*> live_ranges_;
+  UseInterval* use_interval_;
+  LifetimePosition end_position_;
 
-  bool Allocate();
+  DISALLOW_COPY_AND_ASSIGN(SpillRange);
+};
 
-  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
-  const Vector<LiveRange*>* fixed_live_ranges() const {
-    return &fixed_live_ranges_;
+
+class RegisterAllocator FINAL : public ZoneObject {
+ public:
+  explicit RegisterAllocator(const RegisterConfiguration* config,
+                             Zone* local_zone, Frame* frame,
+                             InstructionSequence* code,
+                             const char* debug_name = nullptr);
+
+  bool AllocationOk() { return allocation_ok_; }
+
+  const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
+  const ZoneVector<LiveRange*>& fixed_live_ranges() const {
+    return fixed_live_ranges_;
   }
-  const Vector<LiveRange*>* fixed_double_live_ranges() const {
-    return &fixed_double_live_ranges_;
+  const ZoneVector<LiveRange*>& fixed_double_live_ranges() const {
+    return fixed_double_live_ranges_;
   }
-
-  inline InstructionSequence* code() const { return code_; }
-
+  InstructionSequence* code() const { return code_; }
   // This zone is for datastructures only needed during register allocation.
-  inline Zone* zone() { return &zone_; }
+  Zone* local_zone() const { return local_zone_; }
 
-  // This zone is for InstructionOperands and moves that live beyond register
-  // allocation.
-  inline Zone* code_zone() { return code()->zone(); }
+  // Phase 1 : insert moves to account for fixed register operands.
+  void MeetRegisterConstraints();
 
+  // Phase 2: deconstruct SSA by inserting moves in successors and the headers
+  // of blocks containing phis.
+  void ResolvePhis();
+
+  // Phase 3: compute liveness of all virtual register.
+  void BuildLiveRanges();
+  bool ExistsUseWithoutDefinition();
+
+  // Phase 4: compute register assignments.
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+
+  // Phase 5: reassign spill splots for maximal reuse.
+  void ReuseSpillSlots();
+
+  // Phase 6: commit assignment.
+  void CommitAssignment();
+
+  // Phase 7: compute values for pointer maps.
+  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
+
+  // Phase 8: reconnect split ranges with moves.
+  void ConnectRanges();
+
+  // Phase 9: insert moves to connect ranges across basic blocks.
+  void ResolveControlFlow();
+
+ private:
   int GetVirtualRegister() {
     int vreg = code()->NextVirtualRegister();
     if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
@@ -359,40 +431,40 @@
     return vreg;
   }
 
-  bool AllocationOk() { return allocation_ok_; }
+  // Checks whether the value of a given virtual register is a reference.
+  // TODO(titzer): rename this to IsReference.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  // This zone is for InstructionOperands and moves that live beyond register
+  // allocation.
+  Zone* code_zone() const { return code()->zone(); }
+
+  BitVector* assigned_registers() { return assigned_registers_; }
+  BitVector* assigned_double_registers() { return assigned_double_registers_; }
 
 #ifdef DEBUG
   void Verify() const;
 #endif
 
-  BitVector* assigned_registers() { return assigned_registers_; }
-  BitVector* assigned_double_registers() { return assigned_double_registers_; }
-
- private:
-  void MeetRegisterConstraints();
-  void ResolvePhis();
-  void BuildLiveRanges();
-  void AllocateGeneralRegisters();
-  void AllocateDoubleRegisters();
-  void ConnectRanges();
-  void ResolveControlFlow();
-  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
   void AllocateRegisters();
-  bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
-  inline bool SafePointsAreInOrder() const;
+  bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
+  bool SafePointsAreInOrder() const;
 
   // Liveness analysis support.
-  void InitializeLivenessAnalysis();
-  BitVector* ComputeLiveOut(BasicBlock* block);
-  void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+  BitVector* ComputeLiveOut(const InstructionBlock* block);
+  void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
   bool IsOutputRegisterOf(Instruction* instr, int index);
   bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
-  void ProcessInstructions(BasicBlock* block, BitVector* live);
-  void MeetRegisterConstraints(BasicBlock* block);
+  void ProcessInstructions(const InstructionBlock* block, BitVector* live);
+  void MeetRegisterConstraints(const InstructionBlock* block);
   void MeetConstraintsBetween(Instruction* first, Instruction* second,
                               int gap_index);
-  void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
-  void ResolvePhis(BasicBlock* block);
+  void MeetRegisterConstraintsForLastInstructionInBlock(
+      const InstructionBlock* block);
+  void ResolvePhis(const InstructionBlock* block);
 
   // Helper methods for building intervals.
   InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
@@ -402,8 +474,8 @@
               InstructionOperand* hint);
   void Use(LifetimePosition block_start, LifetimePosition position,
            InstructionOperand* operand, InstructionOperand* hint);
-  void AddConstraintsGapMove(int index, InstructionOperand* from,
-                             InstructionOperand* to);
+  void AddGapMove(int index, GapInstruction::InnerPosition position,
+                  InstructionOperand* from, InstructionOperand* to);
 
   // Helper methods for updating the life range lists.
   void AddToActive(LiveRange* range);
@@ -416,12 +488,14 @@
   void ActiveToInactive(LiveRange* range);
   void InactiveToHandled(LiveRange* range);
   void InactiveToActive(LiveRange* range);
-  void FreeSpillSlot(LiveRange* range);
-  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
 
   // Helper methods for allocating registers.
+  bool TryReuseSpillForPhi(LiveRange* range);
   bool TryAllocateFreeReg(LiveRange* range);
   void AllocateBlockedReg(LiveRange* range);
+  SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
 
   // Live range splitting helpers.
 
@@ -466,52 +540,84 @@
   bool IsBlockBoundary(LifetimePosition pos);
 
   // Helper methods for resolving control flow.
-  void ResolveControlFlow(LiveRange* range, BasicBlock* block,
-                          BasicBlock* pred);
+  void ResolveControlFlow(const InstructionBlock* block,
+                          InstructionOperand* cur_op,
+                          const InstructionBlock* pred,
+                          InstructionOperand* pred_op);
 
-  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+  void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
 
   // Return parallel move that should be used to connect ranges split at the
   // given position.
   ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
 
   // Return the block which contains give lifetime position.
-  BasicBlock* GetBlock(LifetimePosition pos);
+  const InstructionBlock* GetInstructionBlock(LifetimePosition pos);
 
   // Helper methods for the fixed registers.
   int RegisterCount() const;
   static int FixedLiveRangeID(int index) { return -index - 1; }
-  static int FixedDoubleLiveRangeID(int index);
+  int FixedDoubleLiveRangeID(int index);
   LiveRange* FixedLiveRangeFor(int index);
   LiveRange* FixedDoubleLiveRangeFor(int index);
   LiveRange* LiveRangeFor(int index);
-  GapInstruction* GetLastGap(BasicBlock* block);
+  GapInstruction* GetLastGap(const InstructionBlock* block);
 
   const char* RegisterName(int allocation_index);
 
-  inline Instruction* InstructionAt(int index) {
-    return code()->InstructionAt(index);
-  }
+  Instruction* InstructionAt(int index) { return code()->InstructionAt(index); }
 
-  Zone zone_;
-  InstructionSequence* code_;
+  Frame* frame() const { return frame_; }
+  const char* debug_name() const { return debug_name_; }
+  const RegisterConfiguration* config() const { return config_; }
+  ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
+  ZoneVector<LiveRange*>& fixed_live_ranges() { return fixed_live_ranges_; }
+  ZoneVector<LiveRange*>& fixed_double_live_ranges() {
+    return fixed_double_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& unhandled_live_ranges() {
+    return unhandled_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
+  ZoneVector<LiveRange*>& inactive_live_ranges() {
+    return inactive_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& reusable_slots() { return reusable_slots_; }
+  ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
+
+  struct PhiMapValue {
+    PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
+        : phi(phi), block(block) {}
+    PhiInstruction* const phi;
+    const InstructionBlock* const block;
+  };
+  typedef std::map<int, PhiMapValue, std::less<int>,
+                   zone_allocator<std::pair<int, PhiMapValue>>> PhiMap;
+
+  Zone* const local_zone_;
+  Frame* const frame_;
+  InstructionSequence* const code_;
+  const char* const debug_name_;
+
+  const RegisterConfiguration* config_;
+
+  PhiMap phi_map_;
 
   // During liveness analysis keep a mapping from block id to live_in sets
   // for blocks already analyzed.
-  ZoneList<BitVector*> live_in_sets_;
+  ZoneVector<BitVector*> live_in_sets_;
 
   // Liveness analysis results.
-  ZoneList<LiveRange*> live_ranges_;
+  ZoneVector<LiveRange*> live_ranges_;
 
   // Lists of live ranges
-  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
-      fixed_live_ranges_;
-  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
-      fixed_double_live_ranges_;
-  ZoneList<LiveRange*> unhandled_live_ranges_;
-  ZoneList<LiveRange*> active_live_ranges_;
-  ZoneList<LiveRange*> inactive_live_ranges_;
-  ZoneList<LiveRange*> reusable_slots_;
+  ZoneVector<LiveRange*> fixed_live_ranges_;
+  ZoneVector<LiveRange*> fixed_double_live_ranges_;
+  ZoneVector<LiveRange*> unhandled_live_ranges_;
+  ZoneVector<LiveRange*> active_live_ranges_;
+  ZoneVector<LiveRange*> inactive_live_ranges_;
+  ZoneVector<LiveRange*> reusable_slots_;
+  ZoneVector<SpillRange*> spill_ranges_;
 
   RegisterKind mode_;
   int num_registers_;
@@ -529,20 +635,8 @@
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
 };
 
-
-class RegisterAllocatorPhase : public CompilationPhase {
- public:
-  RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
-  ~RegisterAllocatorPhase();
-
- private:
-  RegisterAllocator* allocator_;
-  unsigned allocator_zone_start_allocation_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase);
-};
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/compiler/register-configuration.cc b/src/compiler/register-configuration.cc
new file mode 100644
index 0000000..e7d8bbd
--- /dev/null
+++ b/src/compiler/register-configuration.cc
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-configuration.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
+              Register::kNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+              DoubleRegister::kMaxNumRegisters);
+
+class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
+ public:
+  ArchDefaultRegisterConfiguration()
+      : RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
+                              DoubleRegister::kMaxNumAllocatableRegisters,
+                              DoubleRegister::NumAllocatableAliasedRegisters(),
+                              general_register_name_table_,
+                              double_register_name_table_) {
+    DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
+              Register::NumAllocatableRegisters());
+    for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+      general_register_name_table_[i] = Register::AllocationIndexToString(i);
+    }
+    for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+      double_register_name_table_[i] =
+          DoubleRegister::AllocationIndexToString(i);
+    }
+  }
+
+  const char*
+      general_register_name_table_[Register::kMaxNumAllocatableRegisters];
+  const char*
+      double_register_name_table_[DoubleRegister::kMaxNumAllocatableRegisters];
+};
+
+
+static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
+    kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+
+}  // namepace
+
+
+const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
+  return &kDefaultRegisterConfiguration.Get();
+}
+
+RegisterConfiguration::RegisterConfiguration(
+    int num_general_registers, int num_double_registers,
+    int num_aliased_double_registers, const char* const* general_register_names,
+    const char* const* double_register_names)
+    : num_general_registers_(num_general_registers),
+      num_double_registers_(num_double_registers),
+      num_aliased_double_registers_(num_aliased_double_registers),
+      general_register_names_(general_register_names),
+      double_register_names_(double_register_names) {}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/register-configuration.h b/src/compiler/register-configuration.h
new file mode 100644
index 0000000..8178ba2
--- /dev/null
+++ b/src/compiler/register-configuration.h
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
+#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An architecture independent representation of the sets of registers available
+// for instruction creation.
+class RegisterConfiguration {
+ public:
+  // Architecture independent maxes.
+  static const int kMaxGeneralRegisters = 32;
+  static const int kMaxDoubleRegisters = 32;
+
+  static const RegisterConfiguration* ArchDefault();
+
+  RegisterConfiguration(int num_general_registers, int num_double_registers,
+                        int num_aliased_double_registers,
+                        const char* const* general_register_name,
+                        const char* const* double_register_name);
+
+  int num_general_registers() const { return num_general_registers_; }
+  int num_double_registers() const { return num_double_registers_; }
+  int num_aliased_double_registers() const {
+    return num_aliased_double_registers_;
+  }
+
+  const char* general_register_name(int offset) const {
+    DCHECK(offset >= 0 && offset < kMaxGeneralRegisters);
+    return general_register_names_[offset];
+  }
+  const char* double_register_name(int offset) const {
+    DCHECK(offset >= 0 && offset < kMaxDoubleRegisters);
+    return double_register_names_[offset];
+  }
+
+ private:
+  const int num_general_registers_;
+  const int num_double_registers_;
+  const int num_aliased_double_registers_;
+  const char* const* general_register_names_;
+  const char* const* double_register_names_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index aaa248e..8720afd 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
 #define V8_COMPILER_REPRESENTATION_CHANGE_H_
 
+#include <sstream>
+
 #include "src/base/bits.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
@@ -51,10 +53,10 @@
     }
     if (use_type & kRepTagged) {
       return GetTaggedRepresentationFor(node, output_type);
+    } else if (use_type & kRepFloat32) {
+      return GetFloat32RepresentationFor(node, output_type);
     } else if (use_type & kRepFloat64) {
       return GetFloat64RepresentationFor(node, output_type);
-    } else if (use_type & kRepFloat32) {
-      return TypeError(node, output_type, use_type);  // TODO(titzer): handle
     } else if (use_type & kRepBit) {
       return GetBitRepresentationFor(node, output_type);
     } else if (use_type & rWord) {
@@ -88,6 +90,8 @@
         }
       case IrOpcode::kFloat64Constant:
         return jsgraph()->Constant(OpParameter<double>(node));
+      case IrOpcode::kFloat32Constant:
+        return jsgraph()->Constant(OpParameter<float>(node));
       default:
         break;
     }
@@ -103,6 +107,9 @@
       } else {
         return TypeError(node, output_type, kRepTagged);
       }
+    } else if (output_type & kRepFloat32) {  // float32 -> float64 -> tagged
+      node = InsertChangeFloat32ToFloat64(node);
+      op = simplified()->ChangeFloat64ToTagged();
     } else if (output_type & kRepFloat64) {
       op = simplified()->ChangeFloat64ToTagged();
     } else {
@@ -111,6 +118,52 @@
     return jsgraph()->graph()->NewNode(op, node);
   }
 
+  Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kFloat64Constant:
+      case IrOpcode::kNumberConstant:
+        return jsgraph()->Float32Constant(
+            DoubleToFloat32(OpParameter<double>(node)));
+      case IrOpcode::kInt32Constant:
+        if (output_type & kTypeUint32) {
+          uint32_t value = OpParameter<uint32_t>(node);
+          return jsgraph()->Float32Constant(static_cast<float>(value));
+        } else {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Float32Constant(static_cast<float>(value));
+        }
+      case IrOpcode::kFloat32Constant:
+        return node;  // No change necessary.
+      default:
+        break;
+    }
+    // Select the correct X -> Float32 operator.
+    const Operator* op;
+    if (output_type & kRepBit) {
+      return TypeError(node, output_type, kRepFloat32);
+    } else if (output_type & rWord) {
+      if (output_type & kTypeUint32) {
+        op = machine()->ChangeUint32ToFloat64();
+      } else {
+        op = machine()->ChangeInt32ToFloat64();
+      }
+      // int32 -> float64 -> float32
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    } else if (output_type & kRepTagged) {
+      op = simplified()
+               ->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    } else if (output_type & kRepFloat64) {
+      op = machine()->TruncateFloat64ToFloat32();
+    } else {
+      return TypeError(node, output_type, kRepFloat32);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
   Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
     // Eagerly fold representation changes for constants.
     switch (node->opcode()) {
@@ -126,6 +179,8 @@
         }
       case IrOpcode::kFloat64Constant:
         return node;  // No change necessary.
+      case IrOpcode::kFloat32Constant:
+        return jsgraph()->Float64Constant(OpParameter<float>(node));
       default:
         break;
     }
@@ -141,31 +196,68 @@
       }
     } else if (output_type & kRepTagged) {
       op = simplified()->ChangeTaggedToFloat64();
+    } else if (output_type & kRepFloat32) {
+      op = machine()->ChangeFloat32ToFloat64();
     } else {
       return TypeError(node, output_type, kRepFloat64);
     }
     return jsgraph()->graph()->NewNode(op, node);
   }
 
+  Node* MakeInt32Constant(double value) {
+    if (value < 0) {
+      DCHECK(IsInt32Double(value));
+      int32_t iv = static_cast<int32_t>(value);
+      return jsgraph()->Int32Constant(iv);
+    } else {
+      DCHECK(IsUint32Double(value));
+      int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
+      return jsgraph()->Int32Constant(iv);
+    }
+  }
+
+  Node* GetTruncatedWord32For(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold truncations for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return node;  // No change necessary.
+      case IrOpcode::kFloat32Constant:
+        return jsgraph()->Int32Constant(
+            DoubleToInt32(OpParameter<float>(node)));
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant:
+        return jsgraph()->Int32Constant(
+            DoubleToInt32(OpParameter<double>(node)));
+      default:
+        break;
+    }
+    // Select the correct X -> Word32 truncation operator.
+    const Operator* op = NULL;
+    if (output_type & kRepFloat64) {
+      op = machine()->TruncateFloat64ToInt32();
+    } else if (output_type & kRepFloat32) {
+      node = InsertChangeFloat32ToFloat64(node);
+      op = machine()->TruncateFloat64ToInt32();
+    } else if (output_type & kRepTagged) {
+      node = InsertChangeTaggedToFloat64(node);
+      op = machine()->TruncateFloat64ToInt32();
+    } else {
+      return TypeError(node, output_type, kRepWord32);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
   Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
                                    bool use_unsigned) {
     // Eagerly fold representation changes for constants.
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
         return node;  // No change necessary.
+      case IrOpcode::kFloat32Constant:
+        return MakeInt32Constant(OpParameter<float>(node));
       case IrOpcode::kNumberConstant:
-      case IrOpcode::kFloat64Constant: {
-        double value = OpParameter<double>(node);
-        if (value < 0) {
-          DCHECK(IsInt32Double(value));
-          int32_t iv = static_cast<int32_t>(value);
-          return jsgraph()->Int32Constant(iv);
-        } else {
-          DCHECK(IsUint32Double(value));
-          int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
-          return jsgraph()->Int32Constant(iv);
-        }
-      }
+      case IrOpcode::kFloat64Constant:
+        return MakeInt32Constant(OpParameter<double>(node));
       default:
         break;
     }
@@ -177,6 +269,13 @@
       } else {
         op = machine()->ChangeFloat64ToInt32();
       }
+    } else if (output_type & kRepFloat32) {
+      node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
+      if (output_type & kTypeUint32 || use_unsigned) {
+        op = machine()->ChangeFloat64ToUint32();
+      } else {
+        op = machine()->ChangeFloat64ToInt32();
+      }
     } else if (output_type & kRepTagged) {
       if (output_type & kTypeUint32 || use_unsigned) {
         op = simplified()->ChangeTaggedToUint32();
@@ -195,7 +294,14 @@
       case IrOpcode::kInt32Constant: {
         int32_t value = OpParameter<int32_t>(node);
         if (value == 0 || value == 1) return node;
-        return jsgraph()->OneConstant();  // value != 0
+        return jsgraph()->Int32Constant(1);  // value != 0
+      }
+      case IrOpcode::kNumberConstant: {
+        double value = OpParameter<double>(node);
+        if (std::isnan(value) || value == 0.0) {
+          return jsgraph()->Int32Constant(0);
+        }
+        return jsgraph()->Int32Constant(1);
       }
       case IrOpcode::kHeapConstant: {
         Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
@@ -262,9 +368,9 @@
       case IrOpcode::kNumberMultiply:
         return machine()->Int32Mul();
       case IrOpcode::kNumberDivide:
-        return machine()->Int32UDiv();
+        return machine()->Uint32Div();
       case IrOpcode::kNumberModulus:
-        return machine()->Int32UMod();
+        return machine()->Uint32Mod();
       case IrOpcode::kNumberEqual:
         return machine()->Word32Equal();
       case IrOpcode::kNumberLessThan:
@@ -333,28 +439,39 @@
                   MachineTypeUnion use) {
     type_error_ = true;
     if (!testing_type_errors_) {
-      OStringStream out_str;
+      std::ostringstream out_str;
       out_str << static_cast<MachineType>(output_type);
 
-      OStringStream use_str;
+      std::ostringstream use_str;
       use_str << static_cast<MachineType>(use);
 
       V8_Fatal(__FILE__, __LINE__,
                "RepresentationChangerError: node #%d:%s of "
                "%s cannot be changed to %s",
-               node->id(), node->op()->mnemonic(), out_str.c_str(),
-               use_str.c_str());
+               node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+               use_str.str().c_str());
     }
     return node;
   }
 
+  Node* InsertChangeFloat32ToFloat64(Node* node) {
+    return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
+                                       node);
+  }
+
+  Node* InsertChangeTaggedToFloat64(Node* node) {
+    return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+                                       node);
+  }
+
   JSGraph* jsgraph() { return jsgraph_; }
   Isolate* isolate() { return isolate_; }
   SimplifiedOperatorBuilder* simplified() { return simplified_; }
   MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index a3b5ed3..30bfbc8 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -12,17 +12,86 @@
 namespace internal {
 namespace compiler {
 
-OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
+BasicBlock::BasicBlock(Zone* zone, Id id)
+    : loop_number_(-1),
+      rpo_number_(-1),
+      deferred_(false),
+      dominator_depth_(-1),
+      dominator_(NULL),
+      rpo_next_(NULL),
+      loop_header_(NULL),
+      loop_end_(NULL),
+      loop_depth_(0),
+      control_(kNone),
+      control_input_(NULL),
+      nodes_(zone),
+      successors_(zone),
+      predecessors_(zone),
+      id_(id) {}
+
+
+bool BasicBlock::LoopContains(BasicBlock* block) const {
+  // RPO numbers must be initialized.
+  DCHECK(rpo_number_ >= 0);
+  DCHECK(block->rpo_number_ >= 0);
+  if (loop_end_ == NULL) return false;  // This is not a loop.
+  return block->rpo_number_ >= rpo_number_ &&
+         block->rpo_number_ < loop_end_->rpo_number_;
+}
+
+
+void BasicBlock::AddSuccessor(BasicBlock* successor) {
+  successors_.push_back(successor);
+}
+
+
+void BasicBlock::AddPredecessor(BasicBlock* predecessor) {
+  predecessors_.push_back(predecessor);
+}
+
+
+void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); }
+
+
+void BasicBlock::set_control(Control control) {
+  control_ = control;
+}
+
+
+void BasicBlock::set_control_input(Node* control_input) {
+  control_input_ = control_input;
+}
+
+
+void BasicBlock::set_loop_depth(int32_t loop_depth) {
+  loop_depth_ = loop_depth;
+}
+
+
+void BasicBlock::set_rpo_number(int32_t rpo_number) {
+  rpo_number_ = rpo_number;
+}
+
+
+void BasicBlock::set_loop_end(BasicBlock* loop_end) { loop_end_ = loop_end; }
+
+
+void BasicBlock::set_loop_header(BasicBlock* loop_header) {
+  loop_header_ = loop_header;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
   switch (c) {
-    case BasicBlockData::kNone:
+    case BasicBlock::kNone:
       return os << "none";
-    case BasicBlockData::kGoto:
+    case BasicBlock::kGoto:
       return os << "goto";
-    case BasicBlockData::kBranch:
+    case BasicBlock::kBranch:
       return os << "branch";
-    case BasicBlockData::kReturn:
+    case BasicBlock::kReturn:
       return os << "return";
-    case BasicBlockData::kThrow:
+    case BasicBlock::kThrow:
       return os << "throw";
   }
   UNREACHABLE();
@@ -30,17 +99,181 @@
 }
 
 
-OStream& operator<<(OStream& os, const Schedule& s) {
+std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id) {
+  return os << id.ToSize();
+}
+
+
+std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo) {
+  return os << rpo.ToSize();
+}
+
+
+Schedule::Schedule(Zone* zone, size_t node_count_hint)
+    : zone_(zone),
+      all_blocks_(zone),
+      nodeid_to_block_(zone),
+      rpo_order_(zone),
+      start_(NewBasicBlock()),
+      end_(NewBasicBlock()) {
+  nodeid_to_block_.reserve(node_count_hint);
+}
+
+
+BasicBlock* Schedule::block(Node* node) const {
+  if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
+    return nodeid_to_block_[node->id()];
+  }
+  return NULL;
+}
+
+
+bool Schedule::IsScheduled(Node* node) {
+  int length = static_cast<int>(nodeid_to_block_.size());
+  if (node->id() >= length) return false;
+  return nodeid_to_block_[node->id()] != NULL;
+}
+
+
+BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
+  DCHECK(block_id.ToSize() < all_blocks_.size());
+  return all_blocks_[block_id.ToSize()];
+}
+
+
+bool Schedule::SameBasicBlock(Node* a, Node* b) const {
+  BasicBlock* block = this->block(a);
+  return block != NULL && block == this->block(b);
+}
+
+
+BasicBlock* Schedule::NewBasicBlock() {
+  BasicBlock* block = new (zone_)
+      BasicBlock(zone_, BasicBlock::Id::FromSize(all_blocks_.size()));
+  all_blocks_.push_back(block);
+  return block;
+}
+
+
+void Schedule::PlanNode(BasicBlock* block, Node* node) {
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
+       << " for future add to B" << block->id() << "\n";
+  }
+  DCHECK(this->block(node) == NULL);
+  SetBlockForNode(block, node);
+}
+
+
+void Schedule::AddNode(BasicBlock* block, Node* node) {
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
+       << block->id() << "\n";
+  }
+  DCHECK(this->block(node) == NULL || this->block(node) == block);
+  block->AddNode(node);
+  SetBlockForNode(block, node);
+}
+
+
+void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
+  DCHECK(block->control() == BasicBlock::kNone);
+  block->set_control(BasicBlock::kGoto);
+  AddSuccessor(block, succ);
+}
+
+
+void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
+                         BasicBlock* fblock) {
+  DCHECK(block->control() == BasicBlock::kNone);
+  DCHECK(branch->opcode() == IrOpcode::kBranch);
+  block->set_control(BasicBlock::kBranch);
+  AddSuccessor(block, tblock);
+  AddSuccessor(block, fblock);
+  SetControlInput(block, branch);
+}
+
+
+void Schedule::AddReturn(BasicBlock* block, Node* input) {
+  DCHECK(block->control() == BasicBlock::kNone);
+  block->set_control(BasicBlock::kReturn);
+  SetControlInput(block, input);
+  if (block != end()) AddSuccessor(block, end());
+}
+
+
+void Schedule::AddThrow(BasicBlock* block, Node* input) {
+  DCHECK(block->control() == BasicBlock::kNone);
+  block->set_control(BasicBlock::kThrow);
+  SetControlInput(block, input);
+  if (block != end()) AddSuccessor(block, end());
+}
+
+
+void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
+                            BasicBlock* tblock, BasicBlock* fblock) {
+  DCHECK(block->control() != BasicBlock::kNone);
+  DCHECK(end->control() == BasicBlock::kNone);
+  end->set_control(block->control());
+  block->set_control(BasicBlock::kBranch);
+  MoveSuccessors(block, end);
+  AddSuccessor(block, tblock);
+  AddSuccessor(block, fblock);
+  if (block->control_input() != NULL) {
+    SetControlInput(end, block->control_input());
+  }
+  SetControlInput(block, branch);
+}
+
+
+void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
+  block->AddSuccessor(succ);
+  succ->AddPredecessor(block);
+}
+
+
+void Schedule::MoveSuccessors(BasicBlock* from, BasicBlock* to) {
+  for (BasicBlock::Predecessors::iterator i = from->successors_begin();
+       i != from->successors_end(); ++i) {
+    BasicBlock* succ = *i;
+    to->AddSuccessor(succ);
+    for (BasicBlock::Predecessors::iterator j = succ->predecessors_begin();
+         j != succ->predecessors_end(); ++j) {
+      if (*j == from) *j = to;
+    }
+  }
+  from->ClearSuccessors();
+}
+
+
+void Schedule::SetControlInput(BasicBlock* block, Node* node) {
+  block->set_control_input(node);
+  SetBlockForNode(block, node);
+}
+
+
+void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
+  int length = static_cast<int>(nodeid_to_block_.size());
+  if (node->id() >= length) {
+    nodeid_to_block_.resize(node->id() + 1);
+  }
+  nodeid_to_block_[node->id()] = block;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Schedule& s) {
   // TODO(svenpanne) Const-correct the RPO stuff/iterators.
   BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
   for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
     BasicBlock* block = *i;
     os << "--- BLOCK B" << block->id();
+    if (block->deferred()) os << " (deferred)";
     if (block->PredecessorCount() != 0) os << " <- ";
-    BasicBlock::Predecessors predecessors = block->predecessors();
     bool comma = false;
-    for (BasicBlock::Predecessors::iterator j = predecessors.begin();
-         j != predecessors.end(); ++j) {
+    for (BasicBlock::Predecessors::iterator j = block->predecessors_begin();
+         j != block->predecessors_end(); ++j) {
       if (comma) os << ", ";
       comma = true;
       os << "B" << (*j)->id();
@@ -50,7 +283,7 @@
          ++j) {
       Node* node = *j;
       os << "  " << *node;
-      if (!NodeProperties::IsControl(node)) {
+      if (NodeProperties::IsTyped(node)) {
         Bounds bounds = NodeProperties::GetBounds(node);
         os << " : ";
         bounds.lower->PrintTo(os);
@@ -61,19 +294,18 @@
       }
       os << "\n";
     }
-    BasicBlock::Control control = block->control_;
+    BasicBlock::Control control = block->control();
     if (control != BasicBlock::kNone) {
       os << "  ";
-      if (block->control_input_ != NULL) {
-        os << *block->control_input_;
+      if (block->control_input() != NULL) {
+        os << *block->control_input();
       } else {
         os << "Goto";
       }
       os << " -> ";
-      BasicBlock::Successors successors = block->successors();
       comma = false;
-      for (BasicBlock::Successors::iterator j = successors.begin();
-           j != successors.end(); ++j) {
+      for (BasicBlock::Successors::iterator j = block->successors_begin();
+           j != block->successors_end(); ++j) {
         if (comma) os << ", ";
         comma = true;
         os << "B" << (*j)->id();
@@ -83,6 +315,7 @@
   }
   return os;
 }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 070691e..0bba689 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -5,14 +5,11 @@
 #ifndef V8_COMPILER_SCHEDULE_H_
 #define V8_COMPILER_SCHEDULE_H_
 
+#include <iosfwd>
 #include <vector>
 
 #include "src/v8.h"
 
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
 #include "src/zone.h"
@@ -22,11 +19,15 @@
 namespace compiler {
 
 class BasicBlock;
+class BasicBlockInstrumentor;
 class Graph;
 class ConstructScheduleData;
 class CodeGenerator;  // Because of a namespace bug in clang.
 
-class BasicBlockData {
+// A basic block contains an ordered list of nodes and ends with a control
+// node. Note that if a basic block has phis, then all phis must appear as the
+// first nodes in the block.
+class BasicBlock FINAL : public ZoneObject {
  public:
   // Possible control nodes that can end a block.
   enum Control {
@@ -37,94 +38,83 @@
     kThrow    // Throw an exception.
   };
 
-  int32_t rpo_number_;       // special RPO number of the block.
-  BasicBlock* dominator_;    // Immediate dominator of the block.
-  BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
-                             // NULL if none. For loop headers, this points to
-                             // enclosing loop header.
-  int32_t loop_depth_;       // loop nesting, 0 is top-level
-  int32_t loop_end_;         // end of the loop, if this block is a loop header.
-  int32_t code_start_;       // start index of arch-specific code.
-  int32_t code_end_;         // end index of arch-specific code.
-  bool deferred_;            // {true} if this block is considered the slow
-                             // path.
-  Control control_;          // Control at the end of the block.
-  Node* control_input_;      // Input value for control.
-  NodeVector nodes_;         // nodes of this block in forward order.
+  class Id {
+   public:
+    int ToInt() const { return static_cast<int>(index_); }
+    size_t ToSize() const { return index_; }
+    static Id FromSize(size_t index) { return Id(index); }
+    static Id FromInt(int index) { return Id(static_cast<size_t>(index)); }
 
-  explicit BasicBlockData(Zone* zone)
-      : rpo_number_(-1),
-        dominator_(NULL),
-        loop_header_(NULL),
-        loop_depth_(0),
-        loop_end_(-1),
-        code_start_(-1),
-        code_end_(-1),
-        deferred_(false),
-        control_(kNone),
-        control_input_(NULL),
-        nodes_(zone) {}
+   private:
+    explicit Id(size_t index) : index_(index) {}
+    size_t index_;
+  };
 
-  inline bool IsLoopHeader() const { return loop_end_ >= 0; }
-  inline bool LoopContains(BasicBlockData* block) const {
-    // RPO numbers must be initialized.
-    DCHECK(rpo_number_ >= 0);
-    DCHECK(block->rpo_number_ >= 0);
-    if (loop_end_ < 0) return false;  // This is not a loop.
-    return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_;
-  }
-  int first_instruction_index() {
-    DCHECK(code_start_ >= 0);
-    DCHECK(code_end_ > 0);
-    DCHECK(code_end_ >= code_start_);
-    return code_start_;
-  }
-  int last_instruction_index() {
-    DCHECK(code_start_ >= 0);
-    DCHECK(code_end_ > 0);
-    DCHECK(code_end_ >= code_start_);
-    return code_end_ - 1;
-  }
-};
-
-OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
-
-// A basic block contains an ordered list of nodes and ends with a control
-// node. Note that if a basic block has phis, then all phis must appear as the
-// first nodes in the block.
-class BasicBlock FINAL : public GenericNode<BasicBlockData, BasicBlock> {
- public:
-  BasicBlock(GenericGraphBase* graph, int input_count)
-      : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
-
-  typedef Uses Successors;
-  typedef Inputs Predecessors;
-
-  Successors successors() { return static_cast<Successors>(uses()); }
-  Predecessors predecessors() { return static_cast<Predecessors>(inputs()); }
-
-  int PredecessorCount() { return InputCount(); }
-  BasicBlock* PredecessorAt(int index) { return InputAt(index); }
-
-  int SuccessorCount() { return UseCount(); }
-  BasicBlock* SuccessorAt(int index) { return UseAt(index); }
-
-  int PredecessorIndexOf(BasicBlock* predecessor) {
-    BasicBlock::Predecessors predecessors = this->predecessors();
-    for (BasicBlock::Predecessors::iterator i = predecessors.begin();
-         i != predecessors.end(); ++i) {
-      if (*i == predecessor) return i.index();
+  static const int kInvalidRpoNumber = -1;
+  class RpoNumber FINAL {
+   public:
+    int ToInt() const {
+      DCHECK(IsValid());
+      return index_;
     }
-    return -1;
-  }
+    size_t ToSize() const {
+      DCHECK(IsValid());
+      return static_cast<size_t>(index_);
+    }
+    bool IsValid() const { return index_ >= 0; }
+    static RpoNumber FromInt(int index) { return RpoNumber(index); }
+    static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
 
-  inline BasicBlock* loop_header() {
-    return static_cast<BasicBlock*>(loop_header_);
+    bool IsNext(const RpoNumber other) const {
+      DCHECK(IsValid());
+      return other.index_ == this->index_ + 1;
+    }
+
+    bool operator==(RpoNumber other) const {
+      return this->index_ == other.index_;
+    }
+
+   private:
+    explicit RpoNumber(int32_t index) : index_(index) {}
+    int32_t index_;
+  };
+
+  BasicBlock(Zone* zone, Id id);
+
+  Id id() const { return id_; }
+
+  // Predecessors and successors.
+  typedef ZoneVector<BasicBlock*> Predecessors;
+  Predecessors::iterator predecessors_begin() { return predecessors_.begin(); }
+  Predecessors::iterator predecessors_end() { return predecessors_.end(); }
+  Predecessors::const_iterator predecessors_begin() const {
+    return predecessors_.begin();
   }
-  inline BasicBlock* ContainingLoop() {
-    if (IsLoopHeader()) return this;
-    return static_cast<BasicBlock*>(loop_header_);
+  Predecessors::const_iterator predecessors_end() const {
+    return predecessors_.end();
   }
+  size_t PredecessorCount() const { return predecessors_.size(); }
+  BasicBlock* PredecessorAt(size_t index) { return predecessors_[index]; }
+  void ClearPredecessors() { predecessors_.clear(); }
+  void AddPredecessor(BasicBlock* predecessor);
+
+  typedef ZoneVector<BasicBlock*> Successors;
+  Successors::iterator successors_begin() { return successors_.begin(); }
+  Successors::iterator successors_end() { return successors_.end(); }
+  Successors::const_iterator successors_begin() const {
+    return successors_.begin();
+  }
+  Successors::const_iterator successors_end() const {
+    return successors_.end();
+  }
+  size_t SuccessorCount() const { return successors_.size(); }
+  BasicBlock* SuccessorAt(size_t index) { return successors_[index]; }
+  void ClearSuccessors() { successors_.clear(); }
+  void AddSuccessor(BasicBlock* successor);
+
+  // Nodes in the basic block.
+  Node* NodeAt(size_t index) { return nodes_[index]; }
+  size_t NodeCount() const { return nodes_.size(); }
 
   typedef NodeVector::iterator iterator;
   iterator begin() { return nodes_.begin(); }
@@ -138,12 +128,79 @@
   reverse_iterator rbegin() { return nodes_.rbegin(); }
   reverse_iterator rend() { return nodes_.rend(); }
 
+  void AddNode(Node* node);
+  template <class InputIterator>
+  void InsertNodes(iterator insertion_point, InputIterator insertion_start,
+                   InputIterator insertion_end) {
+    nodes_.insert(insertion_point, insertion_start, insertion_end);
+  }
+
+  // Accessors.
+  Control control() const { return control_; }
+  void set_control(Control control);
+
+  Node* control_input() const { return control_input_; }
+  void set_control_input(Node* control_input);
+
+  bool deferred() const { return deferred_; }
+  void set_deferred(bool deferred) { deferred_ = deferred; }
+
+  int32_t dominator_depth() const { return dominator_depth_; }
+  void set_dominator_depth(int32_t depth) { dominator_depth_ = depth; }
+
+  BasicBlock* dominator() const { return dominator_; }
+  void set_dominator(BasicBlock* dominator) { dominator_ = dominator; }
+
+  BasicBlock* rpo_next() const { return rpo_next_; }
+  void set_rpo_next(BasicBlock* rpo_next) { rpo_next_ = rpo_next; }
+
+  BasicBlock* loop_header() const { return loop_header_; }
+  void set_loop_header(BasicBlock* loop_header);
+
+  BasicBlock* loop_end() const { return loop_end_; }
+  void set_loop_end(BasicBlock* loop_end);
+
+  int32_t loop_depth() const { return loop_depth_; }
+  void set_loop_depth(int32_t loop_depth);
+
+  int32_t loop_number() const { return loop_number_; }
+  void set_loop_number(int32_t loop_number) { loop_number_ = loop_number; }
+
+  RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
+  int32_t rpo_number() const { return rpo_number_; }
+  void set_rpo_number(int32_t rpo_number);
+
+  // Loop membership helpers.
+  inline bool IsLoopHeader() const { return loop_end_ != NULL; }
+  bool LoopContains(BasicBlock* block) const;
+
  private:
+  int32_t loop_number_;      // loop number of the block.
+  int32_t rpo_number_;       // special RPO number of the block.
+  bool deferred_;            // true if the block contains deferred code.
+  int32_t dominator_depth_;  // Depth within the dominator tree.
+  BasicBlock* dominator_;    // Immediate dominator of the block.
+  BasicBlock* rpo_next_;     // Link to next block in special RPO order.
+  BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
+                             // NULL if none. For loop headers, this points to
+                             // enclosing loop header.
+  BasicBlock* loop_end_;     // end of the loop, if this block is a loop header.
+  int32_t loop_depth_;       // loop nesting, 0 is top-level
+
+  Control control_;          // Control at the end of the block.
+  Node* control_input_;      // Input value for control.
+  NodeVector nodes_;         // nodes of this block in forward order.
+
+  Successors successors_;
+  Predecessors predecessors_;
+  Id id_;
+
   DISALLOW_COPY_AND_ASSIGN(BasicBlock);
 };
 
-typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
-    NullBasicBlockVisitor;
+std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c);
+std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id);
+std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo);
 
 typedef ZoneVector<BasicBlock*> BasicBlockVector;
 typedef BasicBlockVector::iterator BasicBlockVectorIter;
@@ -153,154 +210,86 @@
 // and ordering them within basic blocks. Prior to computing a schedule,
 // a graph has no notion of control flow ordering other than that induced
 // by the graph's dependencies. A schedule is required to generate code.
-class Schedule : public GenericGraph<BasicBlock> {
+class Schedule FINAL : public ZoneObject {
  public:
-  explicit Schedule(Zone* zone)
-      : GenericGraph<BasicBlock>(zone),
-        zone_(zone),
-        all_blocks_(zone),
-        nodeid_to_block_(zone),
-        rpo_order_(zone) {
-    SetStart(NewBasicBlock());  // entry.
-    SetEnd(NewBasicBlock());    // exit.
-  }
+  explicit Schedule(Zone* zone, size_t node_count_hint = 0);
 
   // Return the block which contains {node}, if any.
-  BasicBlock* block(Node* node) const {
-    if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
-      return nodeid_to_block_[node->id()];
-    }
-    return NULL;
-  }
+  BasicBlock* block(Node* node) const;
 
-  bool IsScheduled(Node* node) {
-    int length = static_cast<int>(nodeid_to_block_.size());
-    if (node->id() >= length) return false;
-    return nodeid_to_block_[node->id()] != NULL;
-  }
+  bool IsScheduled(Node* node);
+  BasicBlock* GetBlockById(BasicBlock::Id block_id);
 
-  BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
-
-  int BasicBlockCount() const { return NodeCount(); }
-  int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); }
-
-  typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;
-
-  // Return a list of all the blocks in the schedule, in arbitrary order.
-  BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); }
+  size_t BasicBlockCount() const { return all_blocks_.size(); }
+  size_t RpoBlockCount() const { return rpo_order_.size(); }
 
   // Check if nodes {a} and {b} are in the same block.
-  inline bool SameBasicBlock(Node* a, Node* b) const {
-    BasicBlock* block = this->block(a);
-    return block != NULL && block == this->block(b);
-  }
+  bool SameBasicBlock(Node* a, Node* b) const;
 
   // BasicBlock building: create a new block.
-  inline BasicBlock* NewBasicBlock() {
-    BasicBlock* block =
-        BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL));
-    all_blocks_.push_back(block);
-    return block;
-  }
+  BasicBlock* NewBasicBlock();
 
   // BasicBlock building: records that a node will later be added to a block but
   // doesn't actually add the node to the block.
-  inline void PlanNode(BasicBlock* block, Node* node) {
-    if (FLAG_trace_turbo_scheduler) {
-      PrintF("Planning #%d:%s for future add to B%d\n", node->id(),
-             node->op()->mnemonic(), block->id());
-    }
-    DCHECK(this->block(node) == NULL);
-    SetBlockForNode(block, node);
-  }
+  void PlanNode(BasicBlock* block, Node* node);
 
   // BasicBlock building: add a node to the end of the block.
-  inline void AddNode(BasicBlock* block, Node* node) {
-    if (FLAG_trace_turbo_scheduler) {
-      PrintF("Adding #%d:%s to B%d\n", node->id(), node->op()->mnemonic(),
-             block->id());
-    }
-    DCHECK(this->block(node) == NULL || this->block(node) == block);
-    block->nodes_.push_back(node);
-    SetBlockForNode(block, node);
-  }
+  void AddNode(BasicBlock* block, Node* node);
 
   // BasicBlock building: add a goto to the end of {block}.
-  void AddGoto(BasicBlock* block, BasicBlock* succ) {
-    DCHECK(block->control_ == BasicBlock::kNone);
-    block->control_ = BasicBlock::kGoto;
-    AddSuccessor(block, succ);
-  }
+  void AddGoto(BasicBlock* block, BasicBlock* succ);
 
   // BasicBlock building: add a branch at the end of {block}.
   void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
-                 BasicBlock* fblock) {
-    DCHECK(block->control_ == BasicBlock::kNone);
-    DCHECK(branch->opcode() == IrOpcode::kBranch);
-    block->control_ = BasicBlock::kBranch;
-    AddSuccessor(block, tblock);
-    AddSuccessor(block, fblock);
-    SetControlInput(block, branch);
-    if (branch->opcode() == IrOpcode::kBranch) {
-      // TODO(titzer): require a Branch node here. (sloppy tests).
-      SetBlockForNode(block, branch);
-    }
-  }
+                 BasicBlock* fblock);
 
   // BasicBlock building: add a return at the end of {block}.
-  void AddReturn(BasicBlock* block, Node* input) {
-    DCHECK(block->control_ == BasicBlock::kNone);
-    block->control_ = BasicBlock::kReturn;
-    SetControlInput(block, input);
-    if (block != end()) AddSuccessor(block, end());
-    if (input->opcode() == IrOpcode::kReturn) {
-      // TODO(titzer): require a Return node here. (sloppy tests).
-      SetBlockForNode(block, input);
-    }
-  }
+  void AddReturn(BasicBlock* block, Node* input);
 
   // BasicBlock building: add a throw at the end of {block}.
-  void AddThrow(BasicBlock* block, Node* input) {
-    DCHECK(block->control_ == BasicBlock::kNone);
-    block->control_ = BasicBlock::kThrow;
-    SetControlInput(block, input);
-    if (block != end()) AddSuccessor(block, end());
-  }
+  void AddThrow(BasicBlock* block, Node* input);
 
-  friend class Scheduler;
-  friend class CodeGenerator;
+  // BasicBlock mutation: insert a branch into the end of {block}.
+  void InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
+                    BasicBlock* tblock, BasicBlock* fblock);
 
-  void AddSuccessor(BasicBlock* block, BasicBlock* succ) {
-    succ->AppendInput(zone_, block);
+  // Exposed publicly for testing only.
+  void AddSuccessorForTesting(BasicBlock* block, BasicBlock* succ) {
+    return AddSuccessor(block, succ);
   }
 
   BasicBlockVector* rpo_order() { return &rpo_order_; }
+  const BasicBlockVector* rpo_order() const { return &rpo_order_; }
+
+  BasicBlock* start() { return start_; }
+  BasicBlock* end() { return end_; }
+
+  Zone* zone() const { return zone_; }
 
  private:
-  friend class ScheduleVisualizer;
+  friend class Scheduler;
+  friend class BasicBlockInstrumentor;
 
-  void SetControlInput(BasicBlock* block, Node* node) {
-    block->control_input_ = node;
-    SetBlockForNode(block, node);
-  }
+  void AddSuccessor(BasicBlock* block, BasicBlock* succ);
+  void MoveSuccessors(BasicBlock* from, BasicBlock* to);
 
-  void SetBlockForNode(BasicBlock* block, Node* node) {
-    int length = static_cast<int>(nodeid_to_block_.size());
-    if (node->id() >= length) {
-      nodeid_to_block_.resize(node->id() + 1);
-    }
-    nodeid_to_block_[node->id()] = block;
-  }
+  void SetControlInput(BasicBlock* block, Node* node);
+  void SetBlockForNode(BasicBlock* block, Node* node);
 
   Zone* zone_;
   BasicBlockVector all_blocks_;           // All basic blocks in the schedule.
   BasicBlockVector nodeid_to_block_;      // Map from node to containing block.
   BasicBlockVector rpo_order_;            // Reverse-post-order block list.
+  BasicBlock* start_;
+  BasicBlock* end_;
+
+  DISALLOW_COPY_AND_ASSIGN(Schedule);
 };
 
-OStream& operator<<(OStream& os, const Schedule& s);
-}
-}
-}  // namespace v8::internal::compiler
+std::ostream& operator<<(std::ostream& os, const Schedule& s);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_SCHEDULE_H_
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 4029950..f12c631 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -7,12 +7,13 @@
 
 #include "src/compiler/scheduler.h"
 
+#include "src/bit-vector.h"
+#include "src/compiler/control-equivalence.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node-properties-inl.h"
-#include "src/data-flow.h"
 
 namespace v8 {
 namespace internal {
@@ -28,30 +29,225 @@
 }
 
 
-// Internal class to build a control flow graph (i.e the basic blocks and edges
-// between them within a Schedule) from the node graph.
-// Visits the control edges of the graph backwards from end in order to find
-// the connected control subgraph, needed for scheduling.
-class CFGBuilder {
- public:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-  ZoneQueue<Node*> queue_;
-  NodeVector control_;
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
+      graph_(graph),
+      schedule_(schedule),
+      scheduled_nodes_(zone),
+      schedule_root_nodes_(zone),
+      schedule_queue_(zone),
+      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
 
+
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
+  Schedule* schedule = new (graph->zone())
+      Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
+  Scheduler scheduler(zone, graph, schedule);
+
+  scheduler.BuildCFG();
+  scheduler.ComputeSpecialRPONumbering();
+  scheduler.GenerateImmediateDominatorTree();
+
+  scheduler.PrepareUses();
+  scheduler.ScheduleEarly();
+  scheduler.ScheduleLate();
+
+  scheduler.SealFinalSchedule();
+
+  return schedule;
+}
+
+
+Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
+  SchedulerData def = {schedule_->start(), 0, kUnknown};
+  return def;
+}
+
+
+Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
+  DCHECK(node->id() < static_cast<int>(node_data_.size()));
+  return &node_data_[node->id()];
+}
+
+
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are always fixed to the start node.
+        data->placement_ = kFixed;
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are fixed if their control inputs are, whereas
+        // otherwise they are coupled to a floating control node.
+        Placement p = GetPlacement(NodeProperties::GetControlInput(node));
+        data->placement_ = (p == kFixed ? kFixed : kCoupled);
+        break;
+      }
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
+      {
+        // Control nodes that were not control-reachable from end may float.
+        data->placement_ = kSchedulable;
+        break;
+      }
+      default:
+        data->placement_ = kSchedulable;
+        break;
+    }
+  }
+  return data->placement_;
+}
+
+
+void Scheduler::UpdatePlacement(Node* node, Placement placement) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ != kUnknown) {  // Trap on mutation, not initialization.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are fixed once and for all.
+        UNREACHABLE();
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are coupled to their respective blocks.
+        DCHECK_EQ(Scheduler::kCoupled, data->placement_);
+        DCHECK_EQ(Scheduler::kFixed, placement);
+        Node* control = NodeProperties::GetControlInput(node);
+        BasicBlock* block = schedule_->block(control);
+        schedule_->AddNode(block, node);
+        break;
+      }
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
+      {
+        // Control nodes force coupled uses to be placed.
+        Node::Uses uses = node->uses();
+        for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+          if (GetPlacement(*i) == Scheduler::kCoupled) {
+            DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
+            UpdatePlacement(*i, placement);
+          }
+        }
+        break;
+      }
+      default:
+        DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+        DCHECK_EQ(Scheduler::kScheduled, placement);
+        break;
+    }
+    // Reduce the use count of the node's inputs to potentially make them
+    // schedulable. If all the uses of a node have been scheduled, then the node
+    // itself can be scheduled.
+    for (Edge const edge : node->input_edges()) {
+      DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
+    }
+  }
+  data->placement_ = placement;
+}
+
+
+bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
+  return GetPlacement(node) == kCoupled &&
+         NodeProperties::FirstControlIndex(node) == index;
+}
+
+
+void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
+                                             Node* from) {
+  // Make sure that control edges from coupled nodes are not counted.
+  if (IsCoupledControlEdge(from, index)) return;
+
+  // Tracking use counts for fixed nodes is useless.
+  if (GetPlacement(node) == kFixed) return;
+
+  // Use count for coupled nodes is summed up on their control.
+  if (GetPlacement(node) == kCoupled) {
+    Node* control = NodeProperties::GetControlInput(node);
+    return IncrementUnscheduledUseCount(control, index, from);
+  }
+
+  ++(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+}
+
+
+void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
+                                             Node* from) {
+  // Make sure that control edges from coupled nodes are not counted.
+  if (IsCoupledControlEdge(from, index)) return;
+
+  // Tracking use counts for fixed nodes is useless.
+  if (GetPlacement(node) == kFixed) return;
+
+  // Use count for coupled nodes is summed up on their control.
+  if (GetPlacement(node) == kCoupled) {
+    Node* control = NodeProperties::GetControlInput(node);
+    return DecrementUnscheduledUseCount(control, index, from);
+  }
+
+  DCHECK(GetData(node)->unscheduled_count_ > 0);
+  --(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+  if (GetData(node)->unscheduled_count_ == 0) {
+    Trace("    newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
+    schedule_queue_.push(node);
+  }
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+  while (b1 != b2) {
+    int32_t b1_depth = b1->dominator_depth();
+    int32_t b2_depth = b2->dominator_depth();
+    if (b1_depth < b2_depth) {
+      b2 = b2->dominator();
+    } else {
+      b1 = b1->dominator();
+    }
+  }
+  return b1;
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 1: Build control-flow graph.
+
+
+// Internal class to build a control flow graph (i.e the basic blocks and edges
+// between them within a Schedule) from the node graph. Visits control edges of
+// the graph backwards from an end node in order to find the connected control
+// subgraph, needed for scheduling.
+class CFGBuilder : public ZoneObject {
+ public:
   CFGBuilder(Zone* zone, Scheduler* scheduler)
       : scheduler_(scheduler),
         schedule_(scheduler->schedule_),
+        queued_(scheduler->graph_, 2),
         queue_(zone),
-        control_(zone) {}
+        control_(zone),
+        component_entry_(NULL),
+        component_start_(NULL),
+        component_end_(NULL) {}
 
   // Run the control flow graph construction algorithm by walking the graph
   // backwards from end through control edges, building and connecting the
   // basic blocks for control nodes.
   void Run() {
-    Graph* graph = scheduler_->graph_;
-    FixNode(schedule_->start(), graph->start());
-    Queue(graph->end());
+    ResetDataStructures();
+    Queue(scheduler_->graph_->end());
 
     while (!queue_.empty()) {  // Breadth-first backwards traversal.
       Node* node = queue_.front();
@@ -65,33 +261,82 @@
     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     }
-
-    FixNode(schedule_->end(), graph->end());
   }
 
+  // Run the control flow graph construction for a minimal control-connected
+  // component ending in {exit} and merge that component into an existing
+  // control flow graph at the bottom of {block}.
+  void Run(BasicBlock* block, Node* exit) {
+    ResetDataStructures();
+    Queue(exit);
+
+    component_entry_ = NULL;
+    component_start_ = block;
+    component_end_ = schedule_->block(exit);
+    scheduler_->equivalence_->Run(exit);
+    while (!queue_.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue_.front();
+      queue_.pop();
+
+      // Use control dependence equivalence to find a canonical single-entry
+      // single-exit region that makes up a minimal component to be scheduled.
+      if (IsSingleEntrySingleExitRegion(node, exit)) {
+        Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
+        DCHECK_EQ(NULL, component_entry_);
+        component_entry_ = node;
+        continue;
+      }
+
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        Queue(node->InputAt(i));
+      }
+    }
+    DCHECK_NE(NULL, component_entry_);
+
+    for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+      ConnectBlocks(*i);  // Connect block to its predecessor/successors.
+    }
+  }
+
+ private:
+  // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
+  friend class Scheduler;
+
   void FixNode(BasicBlock* block, Node* node) {
     schedule_->AddNode(block, node);
-    scheduler_->GetData(node)->is_connected_control_ = true;
-    scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+    scheduler_->UpdatePlacement(node, Scheduler::kFixed);
   }
 
   void Queue(Node* node) {
-    // Mark the connected control nodes as they queued.
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    if (!data->is_connected_control_) {
+    // Mark the connected control nodes as they are queued.
+    if (!queued_.Get(node)) {
       BuildBlocks(node);
       queue_.push(node);
+      queued_.Set(node, true);
       control_.push_back(node);
-      data->is_connected_control_ = true;
     }
   }
 
   void BuildBlocks(Node* node) {
     switch (node->opcode()) {
+      case IrOpcode::kEnd:
+        FixNode(schedule_->end(), node);
+        break;
+      case IrOpcode::kStart:
+        FixNode(schedule_->start(), node);
+        break;
       case IrOpcode::kLoop:
       case IrOpcode::kMerge:
         BuildBlockForNode(node);
         break;
+      case IrOpcode::kTerminate: {
+        // Put Terminate in the loop to which it refers.
+        Node* loop = NodeProperties::GetControlInput(node);
+        BasicBlock* block = BuildBlockForNode(loop);
+        FixNode(block, node);
+        break;
+      }
       case IrOpcode::kBranch:
         BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
         break;
@@ -107,11 +352,11 @@
         ConnectMerge(node);
         break;
       case IrOpcode::kBranch:
-        scheduler_->schedule_root_nodes_.push_back(node);
+        scheduler_->UpdatePlacement(node, Scheduler::kFixed);
         ConnectBranch(node);
         break;
       case IrOpcode::kReturn:
-        scheduler_->schedule_root_nodes_.push_back(node);
+        scheduler_->UpdatePlacement(node, Scheduler::kFixed);
         ConnectReturn(node);
         break;
       default:
@@ -119,13 +364,15 @@
     }
   }
 
-  void BuildBlockForNode(Node* node) {
-    if (schedule_->block(node) == NULL) {
-      BasicBlock* block = schedule_->NewBasicBlock();
-      Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
+  BasicBlock* BuildBlockForNode(Node* node) {
+    BasicBlock* block = schedule_->block(node);
+    if (block == NULL) {
+      block = schedule_->NewBasicBlock();
+      Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
             node->op()->mnemonic());
       FixNode(block, node);
     }
+    return block;
   }
 
   void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
@@ -144,14 +391,14 @@
                                    IrOpcode::Value false_opcode) {
     buffer[0] = NULL;
     buffer[1] = NULL;
-    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
-      if ((*i)->opcode() == true_opcode) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == true_opcode) {
         DCHECK_EQ(NULL, buffer[0]);
-        buffer[0] = *i;
+        buffer[0] = use;
       }
-      if ((*i)->opcode() == false_opcode) {
+      if (use->opcode() == false_opcode) {
         DCHECK_EQ(NULL, buffer[1]);
-        buffer[1] = *i;
+        buffer[1] = use;
       }
     }
     DCHECK_NE(NULL, buffer[0]);
@@ -168,33 +415,51 @@
   }
 
   void ConnectBranch(Node* branch) {
-    Node* branch_block_node = NodeProperties::GetControlInput(branch);
-    BasicBlock* branch_block = schedule_->block(branch_block_node);
-    DCHECK(branch_block != NULL);
-
     BasicBlock* successor_blocks[2];
     CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
                            IrOpcode::kIfFalse);
 
-    TraceConnect(branch, branch_block, successor_blocks[0]);
-    TraceConnect(branch, branch_block, successor_blocks[1]);
+    // Consider branch hints.
+    switch (BranchHintOf(branch->op())) {
+      case BranchHint::kNone:
+        break;
+      case BranchHint::kTrue:
+        successor_blocks[1]->set_deferred(true);
+        break;
+      case BranchHint::kFalse:
+        successor_blocks[0]->set_deferred(true);
+        break;
+    }
 
-    schedule_->AddBranch(branch_block, branch, successor_blocks[0],
-                         successor_blocks[1]);
+    if (branch == component_entry_) {
+      TraceConnect(branch, component_start_, successor_blocks[0]);
+      TraceConnect(branch, component_start_, successor_blocks[1]);
+      schedule_->InsertBranch(component_start_, component_end_, branch,
+                              successor_blocks[0], successor_blocks[1]);
+    } else {
+      Node* branch_block_node = NodeProperties::GetControlInput(branch);
+      BasicBlock* branch_block = schedule_->block(branch_block_node);
+      DCHECK(branch_block != NULL);
+
+      TraceConnect(branch, branch_block, successor_blocks[0]);
+      TraceConnect(branch, branch_block, successor_blocks[1]);
+      schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+                           successor_blocks[1]);
+    }
   }
 
   void ConnectMerge(Node* merge) {
+    // Don't connect the special merge at the end to its predecessors.
+    if (IsFinalMerge(merge)) return;
+
     BasicBlock* block = schedule_->block(merge);
     DCHECK(block != NULL);
     // For all of the merge's control inputs, add a goto at the end to the
     // merge's basic block.
-    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
-         ++j) {
-      BasicBlock* predecessor_block = schedule_->block(*j);
-      if ((*j)->opcode() != IrOpcode::kReturn) {
-        TraceConnect(merge, predecessor_block, block);
-        schedule_->AddGoto(predecessor_block, block);
-      }
+    for (Node* const input : merge->inputs()) {
+      BasicBlock* predecessor_block = schedule_->block(input);
+      TraceConnect(merge, predecessor_block, block);
+      schedule_->AddGoto(predecessor_block, block);
     }
   }
 
@@ -209,713 +474,59 @@
     DCHECK_NE(NULL, block);
     if (succ == NULL) {
       Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
-            block->id());
+            block->id().ToInt());
     } else {
       Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
-            block->id(), succ->id());
+            block->id().ToInt(), succ->id().ToInt());
     }
   }
+
+  bool IsFinalMerge(Node* node) {
+    return (node->opcode() == IrOpcode::kMerge &&
+            node == scheduler_->graph_->end()->InputAt(0));
+  }
+
+  bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
+    size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
+    size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
+    return entry != exit && entry_class == exit_class;
+  }
+
+  void ResetDataStructures() {
+    control_.clear();
+    DCHECK(queue_.empty());
+    DCHECK(control_.empty());
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
+  ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
+  NodeVector control_;           // List of encountered control nodes.
+  Node* component_entry_;        // Component single-entry node.
+  BasicBlock* component_start_;  // Component single-entry block.
+  BasicBlock* component_end_;    // Component single-exit block.
 };
 
 
-Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
-  SchedulerData def = {0, 0, false, false, kUnknown};
-  return def;
-}
-
-
-Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
-    : zone_(zone),
-      graph_(graph),
-      schedule_(schedule),
-      scheduled_nodes_(zone),
-      schedule_root_nodes_(zone),
-      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
-      has_floating_control_(false) {}
-
-
-Schedule* Scheduler::ComputeSchedule(Graph* graph) {
-  Schedule* schedule;
-  bool had_floating_control = false;
-  do {
-    Zone tmp_zone(graph->zone()->isolate());
-    schedule = new (graph->zone()) Schedule(graph->zone());
-    Scheduler scheduler(&tmp_zone, graph, schedule);
-
-    scheduler.BuildCFG();
-
-    Scheduler::ComputeSpecialRPO(schedule);
-    scheduler.GenerateImmediateDominatorTree();
-
-    scheduler.PrepareUses();
-    scheduler.ScheduleEarly();
-    scheduler.ScheduleLate();
-
-    had_floating_control = scheduler.ConnectFloatingControl();
-  } while (had_floating_control);
-
-  return schedule;
-}
-
-
-Scheduler::Placement Scheduler::GetPlacement(Node* node) {
-  SchedulerData* data = GetData(node);
-  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
-    switch (node->opcode()) {
-      case IrOpcode::kParameter:
-        // Parameters are always fixed to the start node.
-        data->placement_ = kFixed;
-        break;
-      case IrOpcode::kPhi:
-      case IrOpcode::kEffectPhi: {
-        // Phis and effect phis are fixed if their control inputs are.
-        data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
-        break;
-      }
-#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
-        CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
-#undef DEFINE_FLOATING_CONTROL_CASE
-        {
-          // Control nodes that were not control-reachable from end may float.
-          data->placement_ = kSchedulable;
-          if (!data->is_connected_control_) {
-            data->is_floating_control_ = true;
-            has_floating_control_ = true;
-            Trace("Floating control found: #%d:%s\n", node->id(),
-                  node->op()->mnemonic());
-          }
-          break;
-        }
-      default:
-        data->placement_ = kSchedulable;
-        break;
-    }
-  }
-  return data->placement_;
-}
-
-
 void Scheduler::BuildCFG() {
-  Trace("---------------- CREATING CFG ------------------\n");
-  CFGBuilder cfg_builder(zone_, this);
-  cfg_builder.Run();
+  Trace("--- CREATING CFG -------------------------------------------\n");
+
+  // Instantiate a new control equivalence algorithm for the graph.
+  equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
+
+  // Build a control-flow graph for the main control-connected component that
+  // is being spanned by the graph's start and end nodes.
+  control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
+  control_flow_builder_->Run();
+
   // Initialize per-block data.
   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
 }
 
 
-BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
-  while (b1 != b2) {
-    int b1_rpo = GetRPONumber(b1);
-    int b2_rpo = GetRPONumber(b2);
-    DCHECK(b1_rpo != b2_rpo);
-    if (b1_rpo < b2_rpo) {
-      b2 = b2->dominator_;
-    } else {
-      b1 = b1->dominator_;
-    }
-  }
-  return b1;
-}
-
-
-void Scheduler::GenerateImmediateDominatorTree() {
-  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
-  // if this becomes really slow.
-  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
-  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
-    BasicBlock* current_rpo = schedule_->rpo_order_[i];
-    if (current_rpo != schedule_->start()) {
-      BasicBlock::Predecessors::iterator current_pred =
-          current_rpo->predecessors().begin();
-      BasicBlock::Predecessors::iterator end =
-          current_rpo->predecessors().end();
-      DCHECK(current_pred != end);
-      BasicBlock* dominator = *current_pred;
-      ++current_pred;
-      // For multiple predecessors, walk up the rpo ordering until a common
-      // dominator is found.
-      int current_rpo_pos = GetRPONumber(current_rpo);
-      while (current_pred != end) {
-        // Don't examine backwards edges
-        BasicBlock* pred = *current_pred;
-        if (GetRPONumber(pred) < current_rpo_pos) {
-          dominator = GetCommonDominator(dominator, *current_pred);
-        }
-        ++current_pred;
-      }
-      current_rpo->dominator_ = dominator;
-      Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
-    }
-  }
-}
-
-
-class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
- public:
-  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
-      : has_changed_rpo_constraints_(true),
-        scheduler_(scheduler),
-        schedule_(scheduler->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    int max_rpo = 0;
-    // Fixed nodes already know their schedule early position.
-    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
-      BasicBlock* block = schedule_->block(node);
-      DCHECK(block != NULL);
-      max_rpo = block->rpo_number_;
-      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
-        has_changed_rpo_constraints_ = true;
-      }
-      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
-      Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
-            node->op()->mnemonic(), max_rpo);
-    }
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  GenericGraphVisit::Control Post(Node* node) {
-    int max_rpo = 0;
-    // Otherwise, the minimum rpo for the node is the max of all of the inputs.
-    if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
-      for (InputIter i = node->inputs().begin(); i != node->inputs().end();
-           ++i) {
-        int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
-        if (control_rpo > max_rpo) {
-          max_rpo = control_rpo;
-        }
-      }
-      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
-        has_changed_rpo_constraints_ = true;
-      }
-      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
-      Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
-            node->op()->mnemonic(), max_rpo);
-    }
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
-  // rewritten to use a pre-order traversal from the start instead.
-  bool has_changed_rpo_constraints_;
-
- private:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::ScheduleEarly() {
-  Trace("------------------- SCHEDULE EARLY ----------------\n");
-
-  int fixpoint_count = 0;
-  ScheduleEarlyNodeVisitor visitor(this);
-  while (visitor.has_changed_rpo_constraints_) {
-    visitor.has_changed_rpo_constraints_ = false;
-    graph_->VisitNodeInputsFromEnd(&visitor);
-    fixpoint_count++;
-  }
-
-  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
-}
-
-
-class PrepareUsesVisitor : public NullNodeVisitor {
- public:
-  explicit PrepareUsesVisitor(Scheduler* scheduler)
-      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
-      // Fixed nodes are always roots for schedule late.
-      scheduler_->schedule_root_nodes_.push_back(node);
-      if (!schedule_->IsScheduled(node)) {
-        // Make sure root nodes are scheduled in their respective blocks.
-        Trace("  Scheduling fixed position node #%d:%s\n", node->id(),
-              node->op()->mnemonic());
-        IrOpcode::Value opcode = node->opcode();
-        BasicBlock* block =
-            opcode == IrOpcode::kParameter
-                ? schedule_->start()
-                : schedule_->block(NodeProperties::GetControlInput(node));
-        DCHECK(block != NULL);
-        schedule_->AddNode(block, node);
-      }
-    }
-
-    return GenericGraphVisit::CONTINUE;
-  }
-
-  void PostEdge(Node* from, int index, Node* to) {
-    // If the edge is from an unscheduled node, then tally it in the use count
-    // for all of its inputs. The same criterion will be used in ScheduleLate
-    // for decrementing use counts.
-    if (!schedule_->IsScheduled(from)) {
-      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
-      ++(scheduler_->GetData(to)->unscheduled_count_);
-      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
-            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
-            scheduler_->GetData(to)->unscheduled_count_);
-    }
-  }
-
- private:
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::PrepareUses() {
-  Trace("------------------- PREPARE USES ------------------\n");
-  // Count the uses of every node, it will be used to ensure that all of a
-  // node's uses are scheduled before the node itself.
-  PrepareUsesVisitor prepare_uses(this);
-  graph_->VisitNodeInputsFromEnd(&prepare_uses);
-}
-
-
-class ScheduleLateNodeVisitor : public NullNodeVisitor {
- public:
-  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
-      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    // Don't schedule nodes that are already scheduled.
-    if (schedule_->IsScheduled(node)) {
-      return GenericGraphVisit::CONTINUE;
-    }
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
-
-    // If all the uses of a node have been scheduled, then the node itself can
-    // be scheduled.
-    bool eligible = data->unscheduled_count_ == 0;
-    Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
-          node->op()->mnemonic(), eligible ? "true" : "false");
-    if (!eligible) return GenericGraphVisit::DEFER;
-
-    // Determine the dominating block for all of the uses of this node. It is
-    // the latest block that this node can be scheduled in.
-    BasicBlock* block = NULL;
-    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
-         ++i) {
-      BasicBlock* use_block = GetBlockForUse(i.edge());
-      block = block == NULL ? use_block : use_block == NULL
-                                              ? block
-                                              : scheduler_->GetCommonDominator(
-                                                    block, use_block);
-    }
-    DCHECK(block != NULL);
-
-    int min_rpo = data->minimum_rpo_;
-    Trace(
-        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
-        "minimum_rpo = %d\n",
-        node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
-        min_rpo);
-    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
-    // into enclosing loop pre-headers until they would preceed their
-    // ScheduleEarly position.
-    BasicBlock* hoist_block = block;
-    while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
-      if (hoist_block->loop_depth_ < block->loop_depth_) {
-        block = hoist_block;
-        Trace("  hoisting #%d:%s to block %d\n", node->id(),
-              node->op()->mnemonic(), block->id());
-      }
-      // Try to hoist to the pre-header of the loop header.
-      hoist_block = hoist_block->loop_header();
-      if (hoist_block != NULL) {
-        BasicBlock* pre_header = hoist_block->dominator_;
-        DCHECK(pre_header == NULL ||
-               *hoist_block->predecessors().begin() == pre_header);
-        Trace(
-            "  hoist to pre-header B%d of loop header B%d, depth would be %d\n",
-            pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
-        hoist_block = pre_header;
-      }
-    }
-
-    ScheduleNode(block, node);
-
-    return GenericGraphVisit::CONTINUE;
-  }
-
- private:
-  BasicBlock* GetBlockForUse(Node::Edge edge) {
-    Node* use = edge.from();
-    IrOpcode::Value opcode = use->opcode();
-    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
-      // If the use is from a fixed (i.e. non-floating) phi, use the block
-      // of the corresponding control input to the merge.
-      int index = edge.index();
-      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
-        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
-              use->op()->mnemonic());
-        Node* merge = NodeProperties::GetControlInput(use, 0);
-        opcode = merge->opcode();
-        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
-        use = NodeProperties::GetControlInput(merge, index);
-      }
-    }
-    BasicBlock* result = schedule_->block(use);
-    if (result == NULL) return NULL;
-    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
-          use->op()->mnemonic(), result->id());
-    return result;
-  }
-
-  void ScheduleNode(BasicBlock* block, Node* node) {
-    schedule_->PlanNode(block, node);
-    scheduler_->scheduled_nodes_[block->id()].push_back(node);
-
-    // Reduce the use count of the node's inputs to potentially make them
-    // schedulable.
-    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
-      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
-      DCHECK(data->unscheduled_count_ > 0);
-      --data->unscheduled_count_;
-      if (FLAG_trace_turbo_scheduler) {
-        Trace("  Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
-              (*i)->op()->mnemonic(), i.edge().from()->id(),
-              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
-        if (data->unscheduled_count_ == 0) {
-          Trace("  newly eligible #%d:%s\n", (*i)->id(),
-                (*i)->op()->mnemonic());
-        }
-      }
-    }
-  }
-
-  Scheduler* scheduler_;
-  Schedule* schedule_;
-};
-
-
-void Scheduler::ScheduleLate() {
-  Trace("------------------- SCHEDULE LATE -----------------\n");
-  if (FLAG_trace_turbo_scheduler) {
-    Trace("roots: ");
-    for (NodeVectorIter i = schedule_root_nodes_.begin();
-         i != schedule_root_nodes_.end(); ++i) {
-      Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
-    }
-    Trace("\n");
-  }
-
-  // Schedule: Places nodes in dominator block of all their uses.
-  ScheduleLateNodeVisitor schedule_late_visitor(this);
-
-  {
-    Zone zone(zone_->isolate());
-    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
-                             NodeInputIterationTraits<Node> >(
-        graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
-        &schedule_late_visitor);
-  }
-
-  // Add collected nodes for basic blocks to their blocks in the right order.
-  int block_num = 0;
-  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
-       i != scheduled_nodes_.end(); ++i) {
-    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
-      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
-    }
-    block_num++;
-  }
-}
-
-
-bool Scheduler::ConnectFloatingControl() {
-  if (!has_floating_control_) return false;
-
-  Trace("Connecting floating control...\n");
-
-  // Process blocks and instructions backwards to find and connect floating
-  // control nodes into the control graph according to the block they were
-  // scheduled into.
-  int max = static_cast<int>(schedule_->rpo_order()->size());
-  for (int i = max - 1; i >= 0; i--) {
-    BasicBlock* block = schedule_->rpo_order()->at(i);
-    // TODO(titzer): we place at most one floating control structure per
-    // basic block because scheduling currently can interleave phis from
-    // one subgraph with the merges from another subgraph.
-    bool one_placed = false;
-    for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
-      Node* node = block->nodes_[j];
-      SchedulerData* data = GetData(node);
-      if (data->is_floating_control_ && !data->is_connected_control_ &&
-          !one_placed) {
-        Trace("  Floating control #%d:%s was scheduled in B%d\n", node->id(),
-              node->op()->mnemonic(), block->id());
-        ConnectFloatingControlSubgraph(block, node);
-        one_placed = true;
-      }
-    }
-  }
-
-  return true;
-}
-
-
-void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
-  Node* block_start = block->nodes_[0];
-  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
-  // Find the current "control successor" of the node that starts the block
-  // by searching the control uses for a control input edge from a connected
-  // control node.
-  Node* control_succ = NULL;
-  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
-       ++i) {
-    Node::Edge edge = i.edge();
-    if (NodeProperties::IsControlEdge(edge) &&
-        GetData(edge.from())->is_connected_control_) {
-      DCHECK_EQ(NULL, control_succ);
-      control_succ = edge.from();
-      control_succ->ReplaceInput(edge.index(), end);
-    }
-  }
-  DCHECK_NE(NULL, control_succ);
-  Trace("  Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
-        end->id(), end->op()->mnemonic(), control_succ->id(),
-        control_succ->op()->mnemonic(), block_start->id(),
-        block_start->op()->mnemonic());
-
-  // Find the "start" node of the control subgraph, which should be the
-  // unique node that is itself floating control but has a control input that
-  // is not floating.
-  Node* start = NULL;
-  ZoneQueue<Node*> queue(zone_);
-  queue.push(end);
-  GetData(end)->is_connected_control_ = true;
-  while (!queue.empty()) {
-    Node* node = queue.front();
-    queue.pop();
-    Trace("  Search #%d:%s for control subgraph start\n", node->id(),
-          node->op()->mnemonic());
-    int max = NodeProperties::PastControlIndex(node);
-    for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
-      Node* input = node->InputAt(i);
-      SchedulerData* data = GetData(input);
-      if (data->is_floating_control_) {
-        // {input} is floating control.
-        if (!data->is_connected_control_) {
-          // First time seeing {input} during this traversal, queue it.
-          queue.push(input);
-          data->is_connected_control_ = true;
-        }
-      } else {
-        // Otherwise, {node} is the start node, because it is floating control
-        // but is connected to {input} that is not floating control.
-        DCHECK_EQ(NULL, start);  // There can be only one.
-        start = node;
-      }
-    }
-  }
-
-  DCHECK_NE(NULL, start);
-  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
-
-  Trace("  Connecting floating control start %d:%s to %d:%s\n", start->id(),
-        start->op()->mnemonic(), block_start->id(),
-        block_start->op()->mnemonic());
-}
-
-
-// Numbering for BasicBlockData.rpo_number_ for this block traversal:
-static const int kBlockOnStack = -2;
-static const int kBlockVisited1 = -3;
-static const int kBlockVisited2 = -4;
-static const int kBlockUnvisited1 = -1;
-static const int kBlockUnvisited2 = kBlockVisited1;
-
-struct SpecialRPOStackFrame {
-  BasicBlock* block;
-  int index;
-};
-
-struct BlockList {
-  BasicBlock* block;
-  BlockList* next;
-
-  BlockList* Add(Zone* zone, BasicBlock* b) {
-    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
-    list->block = b;
-    list->next = this;
-    return list;
-  }
-
-  void Serialize(BasicBlockVector* final_order) {
-    for (BlockList* l = this; l != NULL; l = l->next) {
-      l->block->rpo_number_ = static_cast<int>(final_order->size());
-      final_order->push_back(l->block);
-    }
-  }
-};
-
-struct LoopInfo {
-  BasicBlock* header;
-  ZoneList<BasicBlock*>* outgoing;
-  BitVector* members;
-  LoopInfo* prev;
-  BlockList* end;
-  BlockList* start;
-
-  void AddOutgoing(Zone* zone, BasicBlock* block) {
-    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
-    outgoing->Add(block, zone);
-  }
-};
-
-
-static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
-                int unvisited) {
-  if (child->rpo_number_ == unvisited) {
-    stack[depth].block = child;
-    stack[depth].index = 0;
-    child->rpo_number_ = kBlockOnStack;
-    return depth + 1;
-  }
-  return depth;
-}
-
-
-// Computes loop membership from the backedges of the control flow graph.
-static LoopInfo* ComputeLoopInfo(
-    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
-    ZoneList<std::pair<BasicBlock*, int> >* backedges) {
-  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
-  memset(loops, 0, num_loops * sizeof(LoopInfo));
-
-  // Compute loop membership starting from backedges.
-  // O(max(loop_depth) * max(|loop|)
-  for (int i = 0; i < backedges->length(); i++) {
-    BasicBlock* member = backedges->at(i).first;
-    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
-    int loop_num = header->loop_end_;
-    if (loops[loop_num].header == NULL) {
-      loops[loop_num].header = header;
-      loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
-    }
-
-    int queue_length = 0;
-    if (member != header) {
-      // As long as the header doesn't have a backedge to itself,
-      // Push the member onto the queue and process its predecessors.
-      if (!loops[loop_num].members->Contains(member->id())) {
-        loops[loop_num].members->Add(member->id());
-      }
-      queue[queue_length++].block = member;
-    }
-
-    // Propagate loop membership backwards. All predecessors of M up to the
-    // loop header H are members of the loop too. O(|blocks between M and H|).
-    while (queue_length > 0) {
-      BasicBlock* block = queue[--queue_length].block;
-      for (int i = 0; i < block->PredecessorCount(); i++) {
-        BasicBlock* pred = block->PredecessorAt(i);
-        if (pred != header) {
-          if (!loops[loop_num].members->Contains(pred->id())) {
-            loops[loop_num].members->Add(pred->id());
-            queue[queue_length++].block = pred;
-          }
-        }
-      }
-    }
-  }
-  return loops;
-}
-
-
-#if DEBUG
-static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
-  PrintF("-- RPO with %d loops ", num_loops);
-  if (num_loops > 0) {
-    PrintF("(");
-    for (int i = 0; i < num_loops; i++) {
-      if (i > 0) PrintF(" ");
-      PrintF("B%d", loops[i].header->id());
-    }
-    PrintF(") ");
-  }
-  PrintF("-- \n");
-
-  for (int i = 0; i < static_cast<int>(order->size()); i++) {
-    BasicBlock* block = (*order)[i];
-    int bid = block->id();
-    PrintF("%5d:", i);
-    for (int i = 0; i < num_loops; i++) {
-      bool membership = loops[i].members->Contains(bid);
-      bool range = loops[i].header->LoopContains(block);
-      PrintF(membership ? " |" : "  ");
-      PrintF(range ? "x" : " ");
-    }
-    PrintF("  B%d: ", bid);
-    if (block->loop_end_ >= 0) {
-      PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
-    }
-    PrintF("\n");
-  }
-}
-
-
-static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
-                             BasicBlockVector* order) {
-  DCHECK(order->size() > 0);
-  DCHECK((*order)[0]->id() == 0);  // entry should be first.
-
-  for (int i = 0; i < num_loops; i++) {
-    LoopInfo* loop = &loops[i];
-    BasicBlock* header = loop->header;
-
-    DCHECK(header != NULL);
-    DCHECK(header->rpo_number_ >= 0);
-    DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
-    DCHECK(header->loop_end_ >= 0);
-    DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
-    DCHECK(header->loop_end_ > header->rpo_number_);
-
-    // Verify the start ... end list relationship.
-    int links = 0;
-    BlockList* l = loop->start;
-    DCHECK(l != NULL && l->block == header);
-    bool end_found;
-    while (true) {
-      if (l == NULL || l == loop->end) {
-        end_found = (loop->end == l);
-        break;
-      }
-      // The list should be in same order as the final result.
-      DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
-      links++;
-      l = l->next;
-      DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
-    }
-    DCHECK(links > 0);
-    DCHECK(links == (header->loop_end_ - header->rpo_number_));
-    DCHECK(end_found);
-
-    // Check the contiguousness of loops.
-    int count = 0;
-    for (int j = 0; j < static_cast<int>(order->size()); j++) {
-      BasicBlock* block = order->at(j);
-      DCHECK(block->rpo_number_ == j);
-      if (j < header->rpo_number_ || j >= header->loop_end_) {
-        DCHECK(!loop->members->Contains(block->id()));
-      } else {
-        if (block == header) {
-          DCHECK(!loop->members->Contains(block->id()));
-        } else {
-          DCHECK(loop->members->Contains(block->id()));
-        }
-        count++;
-      }
-    }
-    DCHECK(links == count);
-  }
-}
-#endif  // DEBUG
+// -----------------------------------------------------------------------------
+// Phase 2: Compute special RPO and dominator tree.
 
 
 // Compute the special reverse-post-order block ordering, which is essentially
@@ -928,198 +539,945 @@
 //    headed at A.
 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
 //    do not belong to the loop.)
-// Note a simple RPO traversal satisfies (1) but not (3).
-BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
-  Zone tmp_zone(schedule->zone()->isolate());
-  Zone* zone = &tmp_zone;
-  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
-  // RPO should not have been computed for this schedule yet.
-  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
-  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
+// Note a simple RPO traversal satisfies (1) but not (2).
+class SpecialRPONumberer : public ZoneObject {
+ public:
+  SpecialRPONumberer(Zone* zone, Schedule* schedule)
+      : zone_(zone),
+        schedule_(schedule),
+        order_(NULL),
+        beyond_end_(NULL),
+        loops_(zone),
+        backedges_(zone),
+        stack_(zone),
+        previous_block_count_(0) {}
 
-  // Perform an iterative RPO traversal using an explicit stack,
-  // recording backedges that form cycles. O(|B|).
-  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
-  SpecialRPOStackFrame* stack =
-      zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
-  BasicBlock* entry = schedule->start();
-  BlockList* order = NULL;
-  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
-  int num_loops = 0;
-
-  while (stack_depth > 0) {
-    int current = stack_depth - 1;
-    SpecialRPOStackFrame* frame = stack + current;
-
-    if (frame->index < frame->block->SuccessorCount()) {
-      // Process the next successor.
-      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
-      if (succ->rpo_number_ == kBlockVisited1) continue;
-      if (succ->rpo_number_ == kBlockOnStack) {
-        // The successor is on the stack, so this is a backedge (cycle).
-        backedges.Add(
-            std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
-        if (succ->loop_end_ < 0) {
-          // Assign a new loop number to the header if it doesn't have one.
-          succ->loop_end_ = num_loops++;
-        }
-      } else {
-        // Push the successor onto the stack.
-        DCHECK(succ->rpo_number_ == kBlockUnvisited1);
-        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
-      }
-    } else {
-      // Finished with all successors; pop the stack and add the block.
-      order = order->Add(zone, frame->block);
-      frame->block->rpo_number_ = kBlockVisited1;
-      stack_depth--;
-    }
+  // Computes the special reverse-post-order for the main control flow graph,
+  // that is for the graph spanned between the schedule's start and end blocks.
+  void ComputeSpecialRPO() {
+    DCHECK(schedule_->end()->SuccessorCount() == 0);
+    DCHECK_EQ(NULL, order_);  // Main order does not exist yet.
+    ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
   }
 
-  // If no loops were encountered, then the order we computed was correct.
-  LoopInfo* loops = NULL;
-  if (num_loops != 0) {
-    // Otherwise, compute the loop information from the backedges in order
-    // to perform a traversal that groups loop bodies together.
-    loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
-                            &backedges);
+  // Computes the special reverse-post-order for a partial control flow graph,
+  // that is for the graph spanned between the given {entry} and {end} blocks,
+  // then updates the existing ordering with this new information.
+  void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+    DCHECK_NE(NULL, order_);  // Main order to be updated is present.
+    ComputeAndInsertSpecialRPO(entry, end);
+  }
 
-    // Initialize the "loop stack". Note the entry could be a loop header.
-    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
-    order = NULL;
+  // Serialize the previously computed order as a special reverse-post-order
+  // numbering for basic blocks into the final schedule.
+  void SerializeRPOIntoSchedule() {
+    int32_t number = 0;
+    for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+      b->set_rpo_number(number++);
+      schedule_->rpo_order()->push_back(b);
+    }
+    BeyondEndSentinel()->set_rpo_number(number);
+  }
 
-    // Perform an iterative post-order traversal, visiting loop bodies before
-    // edges that lead out of loops. Visits each block once, but linking loop
-    // sections together is linear in the loop size, so overall is
-    // O(|B| + max(loop_depth) * max(|loop|))
-    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
-    while (stack_depth > 0) {
-      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
-      BasicBlock* block = frame->block;
-      BasicBlock* succ = NULL;
+  // Print and verify the special reverse-post-order.
+  void PrintAndVerifySpecialRPO() {
+#if DEBUG
+    if (FLAG_trace_turbo_scheduler) PrintRPO();
+    VerifySpecialRPO();
+#endif
+  }
 
-      if (frame->index < block->SuccessorCount()) {
-        // Process the next normal successor.
-        succ = block->SuccessorAt(frame->index++);
-      } else if (block->IsLoopHeader()) {
-        // Process additional outgoing edges from the loop header.
-        if (block->rpo_number_ == kBlockOnStack) {
-          // Finish the loop body the first time the header is left on the
-          // stack.
-          DCHECK(loop != NULL && loop->header == block);
-          loop->start = order->Add(zone, block);
-          order = loop->end;
-          block->rpo_number_ = kBlockVisited2;
-          // Pop the loop stack and continue visiting outgoing edges within the
-          // the context of the outer loop, if any.
-          loop = loop->prev;
-          // We leave the loop header on the stack; the rest of this iteration
-          // and later iterations will go through its outgoing edges list.
-        }
+ private:
+  typedef std::pair<BasicBlock*, size_t> Backedge;
 
-        // Use the next outgoing edge if there are any.
-        int outgoing_index = frame->index - block->SuccessorCount();
-        LoopInfo* info = &loops[block->loop_end_];
-        DCHECK(loop != info);
-        if (info->outgoing != NULL &&
-            outgoing_index < info->outgoing->length()) {
-          succ = info->outgoing->at(outgoing_index);
-          frame->index++;
-        }
+  // Numbering for BasicBlock::rpo_number for this block traversal:
+  static const int kBlockOnStack = -2;
+  static const int kBlockVisited1 = -3;
+  static const int kBlockVisited2 = -4;
+  static const int kBlockUnvisited1 = -1;
+  static const int kBlockUnvisited2 = kBlockVisited1;
+
+  struct SpecialRPOStackFrame {
+    BasicBlock* block;
+    size_t index;
+  };
+
+  struct LoopInfo {
+    BasicBlock* header;
+    ZoneList<BasicBlock*>* outgoing;
+    BitVector* members;
+    LoopInfo* prev;
+    BasicBlock* end;
+    BasicBlock* start;
+
+    void AddOutgoing(Zone* zone, BasicBlock* block) {
+      if (outgoing == NULL) {
+        outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
       }
+      outgoing->Add(block, zone);
+    }
+  };
 
-      if (succ != NULL) {
+  int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
+           BasicBlock* child, int unvisited) {
+    if (child->rpo_number() == unvisited) {
+      stack[depth].block = child;
+      stack[depth].index = 0;
+      child->set_rpo_number(kBlockOnStack);
+      return depth + 1;
+    }
+    return depth;
+  }
+
+  BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
+    block->set_rpo_next(head);
+    return block;
+  }
+
+  static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
+  static void SetLoopNumber(BasicBlock* block, int loop_number) {
+    return block->set_loop_number(loop_number);
+  }
+  static bool HasLoopNumber(BasicBlock* block) {
+    return block->loop_number() >= 0;
+  }
+
+  // TODO(mstarzinger): We only need this special sentinel because some tests
+  // use the schedule's end block in actual control flow (e.g. with end having
+  // successors). Once this has been cleaned up we can use the end block here.
+  BasicBlock* BeyondEndSentinel() {
+    if (beyond_end_ == NULL) {
+      BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
+      beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
+    }
+    return beyond_end_;
+  }
+
+  // Compute special RPO for the control flow graph between {entry} and {end},
+  // mutating any existing order so that the result is still valid.
+  void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+    // RPO should not have been serialized for this schedule yet.
+    CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
+    CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
+    CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
+
+    // Find correct insertion point within existing order.
+    BasicBlock* insertion_point = entry->rpo_next();
+    BasicBlock* order = insertion_point;
+
+    // Perform an iterative RPO traversal using an explicit stack,
+    // recording backedges that form cycles. O(|B|).
+    DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
+    stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
+    previous_block_count_ = schedule_->BasicBlockCount();
+    int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
+    int num_loops = static_cast<int>(loops_.size());
+
+    while (stack_depth > 0) {
+      int current = stack_depth - 1;
+      SpecialRPOStackFrame* frame = &stack_[current];
+
+      if (frame->block != end &&
+          frame->index < frame->block->SuccessorCount()) {
         // Process the next successor.
-        if (succ->rpo_number_ == kBlockOnStack) continue;
-        if (succ->rpo_number_ == kBlockVisited2) continue;
-        DCHECK(succ->rpo_number_ == kBlockUnvisited2);
-        if (loop != NULL && !loop->members->Contains(succ->id())) {
-          // The successor is not in the current loop or any nested loop.
-          // Add it to the outgoing edges of this loop and visit it later.
-          loop->AddOutgoing(zone, succ);
+        BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+        if (succ->rpo_number() == kBlockVisited1) continue;
+        if (succ->rpo_number() == kBlockOnStack) {
+          // The successor is on the stack, so this is a backedge (cycle).
+          backedges_.push_back(Backedge(frame->block, frame->index - 1));
+          if (!HasLoopNumber(succ)) {
+            // Assign a new loop number to the header if it doesn't have one.
+            SetLoopNumber(succ, num_loops++);
+          }
         } else {
           // Push the successor onto the stack.
-          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
-          if (succ->IsLoopHeader()) {
-            // Push the inner loop onto the loop stack.
-            DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
-            LoopInfo* next = &loops[succ->loop_end_];
-            next->end = order;
-            next->prev = loop;
-            loop = next;
-          }
+          DCHECK(succ->rpo_number() == kBlockUnvisited1);
+          stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
         }
       } else {
-        // Finished with all successors of the current block.
-        if (block->IsLoopHeader()) {
-          // If we are going to pop a loop header, then add its entire body.
-          LoopInfo* info = &loops[block->loop_end_];
-          for (BlockList* l = info->start; true; l = l->next) {
-            if (l->next == info->end) {
-              l->next = order;
-              info->end = order;
-              break;
-            }
-          }
-          order = info->start;
-        } else {
-          // Pop a single node off the stack and add it to the order.
-          order = order->Add(zone, block);
-          block->rpo_number_ = kBlockVisited2;
-        }
+        // Finished with all successors; pop the stack and add the block.
+        order = PushFront(order, frame->block);
+        frame->block->set_rpo_number(kBlockVisited1);
         stack_depth--;
       }
     }
-  }
 
-  // Construct the final order from the list.
-  BasicBlockVector* final_order = &schedule->rpo_order_;
-  order->Serialize(final_order);
+    // If no loops were encountered, then the order we computed was correct.
+    if (num_loops > static_cast<int>(loops_.size())) {
+      // Otherwise, compute the loop information from the backedges in order
+      // to perform a traversal that groups loop bodies together.
+      ComputeLoopInfo(stack_, num_loops, &backedges_);
 
-  // Compute the correct loop header for every block and set the correct loop
-  // ends.
-  LoopInfo* current_loop = NULL;
-  BasicBlock* current_header = NULL;
-  int loop_depth = 0;
-  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
-       ++i) {
-    BasicBlock* current = *i;
-    current->loop_header_ = current_header;
-    if (current->IsLoopHeader()) {
-      loop_depth++;
-      current_loop = &loops[current->loop_end_];
-      BlockList* end = current_loop->end;
-      current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
-                                       : end->block->rpo_number_;
-      current_header = current_loop->header;
-      Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
-            loop_depth);
-    } else {
-      while (current_header != NULL &&
-             current->rpo_number_ >= current_header->loop_end_) {
+      // Initialize the "loop stack". Note the entry could be a loop header.
+      LoopInfo* loop =
+          HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+      order = insertion_point;
+
+      // Perform an iterative post-order traversal, visiting loop bodies before
+      // edges that lead out of loops. Visits each block once, but linking loop
+      // sections together is linear in the loop size, so overall is
+      // O(|B| + max(loop_depth) * max(|loop|))
+      stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
+      while (stack_depth > 0) {
+        SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
+        BasicBlock* block = frame->block;
+        BasicBlock* succ = NULL;
+
+        if (block != end && frame->index < block->SuccessorCount()) {
+          // Process the next normal successor.
+          succ = block->SuccessorAt(frame->index++);
+        } else if (HasLoopNumber(block)) {
+          // Process additional outgoing edges from the loop header.
+          if (block->rpo_number() == kBlockOnStack) {
+            // Finish the loop body the first time the header is left on the
+            // stack.
+            DCHECK(loop != NULL && loop->header == block);
+            loop->start = PushFront(order, block);
+            order = loop->end;
+            block->set_rpo_number(kBlockVisited2);
+            // Pop the loop stack and continue visiting outgoing edges within
+            // the context of the outer loop, if any.
+            loop = loop->prev;
+            // We leave the loop header on the stack; the rest of this iteration
+            // and later iterations will go through its outgoing edges list.
+          }
+
+          // Use the next outgoing edge if there are any.
+          int outgoing_index =
+              static_cast<int>(frame->index - block->SuccessorCount());
+          LoopInfo* info = &loops_[GetLoopNumber(block)];
+          DCHECK(loop != info);
+          if (block != entry && info->outgoing != NULL &&
+              outgoing_index < info->outgoing->length()) {
+            succ = info->outgoing->at(outgoing_index);
+            frame->index++;
+          }
+        }
+
+        if (succ != NULL) {
+          // Process the next successor.
+          if (succ->rpo_number() == kBlockOnStack) continue;
+          if (succ->rpo_number() == kBlockVisited2) continue;
+          DCHECK(succ->rpo_number() == kBlockUnvisited2);
+          if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
+            // The successor is not in the current loop or any nested loop.
+            // Add it to the outgoing edges of this loop and visit it later.
+            loop->AddOutgoing(zone_, succ);
+          } else {
+            // Push the successor onto the stack.
+            stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
+            if (HasLoopNumber(succ)) {
+              // Push the inner loop onto the loop stack.
+              DCHECK(GetLoopNumber(succ) < num_loops);
+              LoopInfo* next = &loops_[GetLoopNumber(succ)];
+              next->end = order;
+              next->prev = loop;
+              loop = next;
+            }
+          }
+        } else {
+          // Finished with all successors of the current block.
+          if (HasLoopNumber(block)) {
+            // If we are going to pop a loop header, then add its entire body.
+            LoopInfo* info = &loops_[GetLoopNumber(block)];
+            for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
+              if (b->rpo_next() == info->end) {
+                b->set_rpo_next(order);
+                info->end = order;
+                break;
+              }
+            }
+            order = info->start;
+          } else {
+            // Pop a single node off the stack and add it to the order.
+            order = PushFront(order, block);
+            block->set_rpo_number(kBlockVisited2);
+          }
+          stack_depth--;
+        }
+      }
+    }
+
+    // Publish new order the first time.
+    if (order_ == NULL) order_ = order;
+
+    // Compute the correct loop headers and set the correct loop ends.
+    LoopInfo* current_loop = NULL;
+    BasicBlock* current_header = entry->loop_header();
+    int32_t loop_depth = entry->loop_depth();
+    if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
+    for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
+      BasicBlock* current = b;
+
+      // Reset BasicBlock::rpo_number again.
+      current->set_rpo_number(kBlockUnvisited1);
+
+      // Finish the previous loop(s) if we just exited them.
+      while (current_header != NULL && current == current_header->loop_end()) {
         DCHECK(current_header->IsLoopHeader());
         DCHECK(current_loop != NULL);
         current_loop = current_loop->prev;
         current_header = current_loop == NULL ? NULL : current_loop->header;
         --loop_depth;
       }
+      current->set_loop_header(current_header);
+
+      // Push a new loop onto the stack if this loop is a loop header.
+      if (HasLoopNumber(current)) {
+        ++loop_depth;
+        current_loop = &loops_[GetLoopNumber(current)];
+        BasicBlock* end = current_loop->end;
+        current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
+        current_header = current_loop->header;
+        Trace("B%d is a loop header, increment loop depth to %d\n",
+              current->id().ToInt(), loop_depth);
+      }
+
+      current->set_loop_depth(loop_depth);
+
+      if (current->loop_header() == NULL) {
+        Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
+              current->loop_depth());
+      } else {
+        Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
+              current->loop_header()->id().ToInt(), current->loop_depth());
+      }
     }
-    current->loop_depth_ = loop_depth;
-    if (current->loop_header_ == NULL) {
-      Trace("B%d is not in a loop (depth == %d)\n", current->id(),
-            current->loop_depth_);
-    } else {
-      Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
-            current->loop_header_->id(), current->loop_depth_);
+  }
+
+  // Computes loop membership from the backedges of the control flow graph.
+  void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
+                       size_t num_loops, ZoneVector<Backedge>* backedges) {
+    // Extend existing loop membership vectors.
+    for (LoopInfo& loop : loops_) {
+      BitVector* new_members = new (zone_)
+          BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
+      new_members->CopyFrom(*loop.members);
+      loop.members = new_members;
+    }
+
+    // Extend loop information vector.
+    loops_.resize(num_loops, LoopInfo());
+
+    // Compute loop membership starting from backedges.
+    // O(max(loop_depth) * max(|loop|)
+    for (size_t i = 0; i < backedges->size(); i++) {
+      BasicBlock* member = backedges->at(i).first;
+      BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+      size_t loop_num = GetLoopNumber(header);
+      if (loops_[loop_num].header == NULL) {
+        loops_[loop_num].header = header;
+        loops_[loop_num].members = new (zone_)
+            BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
+      }
+
+      int queue_length = 0;
+      if (member != header) {
+        // As long as the header doesn't have a backedge to itself,
+        // Push the member onto the queue and process its predecessors.
+        if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
+          loops_[loop_num].members->Add(member->id().ToInt());
+        }
+        queue[queue_length++].block = member;
+      }
+
+      // Propagate loop membership backwards. All predecessors of M up to the
+      // loop header H are members of the loop too. O(|blocks between M and H|).
+      while (queue_length > 0) {
+        BasicBlock* block = queue[--queue_length].block;
+        for (size_t i = 0; i < block->PredecessorCount(); i++) {
+          BasicBlock* pred = block->PredecessorAt(i);
+          if (pred != header) {
+            if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
+              loops_[loop_num].members->Add(pred->id().ToInt());
+              queue[queue_length++].block = pred;
+            }
+          }
+        }
+      }
     }
   }
 
 #if DEBUG
-  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
-  VerifySpecialRPO(num_loops, loops, final_order);
+  void PrintRPO() {
+    OFStream os(stdout);
+    os << "RPO with " << loops_.size() << " loops";
+    if (loops_.size() > 0) {
+      os << " (";
+      for (size_t i = 0; i < loops_.size(); i++) {
+        if (i > 0) os << " ";
+        os << "B" << loops_[i].header->id();
+      }
+      os << ")";
+    }
+    os << ":\n";
+
+    for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
+      BasicBlock::Id bid = block->id();
+      // TODO(jarin,svenpanne): Add formatting here once we have support for
+      // that in streams (we want an equivalent of PrintF("%5d:", x) here).
+      os << "  " << block->rpo_number() << ":";
+      for (size_t i = 0; i < loops_.size(); i++) {
+        bool range = loops_[i].header->LoopContains(block);
+        bool membership = loops_[i].header != block && range;
+        os << (membership ? " |" : "  ");
+        os << (range ? "x" : " ");
+      }
+      os << "  B" << bid << ": ";
+      if (block->loop_end() != NULL) {
+        os << " range: [" << block->rpo_number() << ", "
+           << block->loop_end()->rpo_number() << ")";
+      }
+      if (block->loop_header() != NULL) {
+        os << " header: B" << block->loop_header()->id();
+      }
+      if (block->loop_depth() > 0) {
+        os << " depth: " << block->loop_depth();
+      }
+      os << "\n";
+    }
+  }
+
+  void VerifySpecialRPO() {
+    BasicBlockVector* order = schedule_->rpo_order();
+    DCHECK(order->size() > 0);
+    DCHECK((*order)[0]->id().ToInt() == 0);  // entry should be first.
+
+    for (size_t i = 0; i < loops_.size(); i++) {
+      LoopInfo* loop = &loops_[i];
+      BasicBlock* header = loop->header;
+      BasicBlock* end = header->loop_end();
+
+      DCHECK(header != NULL);
+      DCHECK(header->rpo_number() >= 0);
+      DCHECK(header->rpo_number() < static_cast<int>(order->size()));
+      DCHECK(end != NULL);
+      DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
+      DCHECK(end->rpo_number() > header->rpo_number());
+      DCHECK(header->loop_header() != header);
+
+      // Verify the start ... end list relationship.
+      int links = 0;
+      BasicBlock* block = loop->start;
+      DCHECK_EQ(header, block);
+      bool end_found;
+      while (true) {
+        if (block == NULL || block == loop->end) {
+          end_found = (loop->end == block);
+          break;
+        }
+        // The list should be in same order as the final result.
+        DCHECK(block->rpo_number() == links + header->rpo_number());
+        links++;
+        block = block->rpo_next();
+        DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
+      }
+      DCHECK(links > 0);
+      DCHECK(links == end->rpo_number() - header->rpo_number());
+      DCHECK(end_found);
+
+      // Check loop depth of the header.
+      int loop_depth = 0;
+      for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
+        loop_depth++;
+      }
+      DCHECK_EQ(loop_depth, header->loop_depth());
+
+      // Check the contiguousness of loops.
+      int count = 0;
+      for (int j = 0; j < static_cast<int>(order->size()); j++) {
+        BasicBlock* block = order->at(j);
+        DCHECK(block->rpo_number() == j);
+        if (j < header->rpo_number() || j >= end->rpo_number()) {
+          DCHECK(!header->LoopContains(block));
+        } else {
+          DCHECK(header->LoopContains(block));
+          DCHECK_GE(block->loop_depth(), loop_depth);
+          count++;
+        }
+      }
+      DCHECK(links == count);
+    }
+  }
+#endif  // DEBUG
+
+  Zone* zone_;
+  Schedule* schedule_;
+  BasicBlock* order_;
+  BasicBlock* beyond_end_;
+  ZoneVector<LoopInfo> loops_;
+  ZoneVector<Backedge> backedges_;
+  ZoneVector<SpecialRPOStackFrame> stack_;
+  size_t previous_block_count_;
+};
+
+
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
+  SpecialRPONumberer numberer(zone, schedule);
+  numberer.ComputeSpecialRPO();
+  numberer.SerializeRPOIntoSchedule();
+  numberer.PrintAndVerifySpecialRPO();
+  return schedule->rpo_order();
+}
+
+
+void Scheduler::ComputeSpecialRPONumbering() {
+  Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
+
+  // Compute the special reverse-post-order for basic blocks.
+  special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
+  special_rpo_->ComputeSpecialRPO();
+}
+
+
+void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
+  for (/*nop*/; block != NULL; block = block->rpo_next()) {
+    BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
+    BasicBlock::Predecessors::iterator end = block->predecessors_end();
+    DCHECK(pred != end);  // All blocks except start have predecessors.
+    BasicBlock* dominator = *pred;
+    // For multiple predecessors, walk up the dominator tree until a common
+    // dominator is found. Visitation order guarantees that all predecessors
+    // except for backwards edges have been visited.
+    for (++pred; pred != end; ++pred) {
+      // Don't examine backwards edges.
+      if ((*pred)->dominator_depth() < 0) continue;
+      dominator = GetCommonDominator(dominator, *pred);
+    }
+    block->set_dominator(dominator);
+    block->set_dominator_depth(dominator->dominator_depth() + 1);
+    // Propagate "deferredness" of the dominator.
+    if (dominator->deferred()) block->set_deferred(true);
+    Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
+          dominator->id().ToInt(), block->dominator_depth());
+  }
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+  Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
+
+  // Seed start block to be the first dominator.
+  schedule_->start()->set_dominator_depth(0);
+
+  // Build the block dominator tree resulting from the above seed.
+  PropagateImmediateDominators(schedule_->start()->rpo_next());
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 3: Prepare use counts for nodes.
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+  explicit PrepareUsesVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+  void Pre(Node* node) {
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      // Fixed nodes are always roots for schedule late.
+      scheduler_->schedule_root_nodes_.push_back(node);
+      if (!schedule_->IsScheduled(node)) {
+        // Make sure root nodes are scheduled in their respective blocks.
+        Trace("Scheduling fixed position node #%d:%s\n", node->id(),
+              node->op()->mnemonic());
+        IrOpcode::Value opcode = node->opcode();
+        BasicBlock* block =
+            opcode == IrOpcode::kParameter
+                ? schedule_->start()
+                : schedule_->block(NodeProperties::GetControlInput(node));
+        DCHECK(block != NULL);
+        schedule_->AddNode(block, node);
+      }
+    }
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    // If the edge is from an unscheduled node, then tally it in the use count
+    // for all of its inputs. The same criterion will be used in ScheduleLate
+    // for decrementing use counts.
+    if (!schedule_->IsScheduled(from)) {
+      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
+      scheduler_->IncrementUnscheduledUseCount(to, index, from);
+    }
+  }
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+  Trace("--- PREPARE USES -------------------------------------------\n");
+
+  // Count the uses of every node, it will be used to ensure that all of a
+  // node's uses are scheduled before the node itself.
+  PrepareUsesVisitor prepare_uses(this);
+  graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 4: Schedule nodes early.
+
+
+class ScheduleEarlyNodeVisitor {
+ public:
+  ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
+
+  // Run the schedule early algorithm on a set of fixed root nodes.
+  void Run(NodeVector* roots) {
+    for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
+      queue_.push(*i);
+      while (!queue_.empty()) {
+        VisitNode(queue_.front());
+        queue_.pop();
+      }
+    }
+  }
+
+ private:
+  // Visits one node from the queue and propagates its current schedule early
+  // position to all uses. This in turn might push more nodes onto the queue.
+  void VisitNode(Node* node) {
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+    // Fixed nodes already know their schedule early position.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      data->minimum_block_ = schedule_->block(node);
+      Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+            node->id(), node->op()->mnemonic(),
+            data->minimum_block_->id().ToInt(),
+            data->minimum_block_->dominator_depth());
+    }
+
+    // No need to propagate unconstrained schedule early positions.
+    if (data->minimum_block_ == schedule_->start()) return;
+
+    // Propagate schedule early position.
+    DCHECK(data->minimum_block_ != NULL);
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+      PropagateMinimumPositionToNode(data->minimum_block_, *i);
+    }
+  }
+
+  // Propagates {block} as another minimum position into the given {node}. This
+  // has the net effect of computing the minimum dominator block of {node} that
+  // still post-dominates all inputs to {node} when the queue is processed.
+  void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+
+    // No need to propagate to fixed node, it's guaranteed to be a root.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
+
+    // Coupled nodes influence schedule early position of their control.
+    if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+      Node* control = NodeProperties::GetControlInput(node);
+      PropagateMinimumPositionToNode(block, control);
+    }
+
+    // Propagate new position if it is deeper down the dominator tree than the
+    // current. Note that all inputs need to have minimum block position inside
+    // the dominator chain of {node}'s minimum block position.
+    DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
+    if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
+      data->minimum_block_ = block;
+      queue_.push(node);
+      Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+            node->id(), node->op()->mnemonic(),
+            data->minimum_block_->id().ToInt(),
+            data->minimum_block_->dominator_depth());
+    }
+  }
+
+#if DEBUG
+  bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
+    BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+    return dominator == b1 || dominator == b2;
+  }
 #endif
-  return final_order;
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  ZoneQueue<Node*> queue_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+  Trace("--- SCHEDULE EARLY -----------------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (Node* node : schedule_root_nodes_) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Compute the minimum block for each node thereby determining the earliest
+  // position each node could be placed within a valid schedule.
+  ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+  schedule_early_visitor.Run(&schedule_root_nodes_);
 }
+
+
+// -----------------------------------------------------------------------------
+// Phase 5: Schedule nodes late.
+
+
+class ScheduleLateNodeVisitor {
+ public:
+  ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+  // Run the schedule late algorithm on a set of fixed root nodes.
+  void Run(NodeVector* roots) {
+    for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
+      ProcessQueue(*i);
+    }
+  }
+
+ private:
+  void ProcessQueue(Node* root) {
+    ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
+    for (Node* node : root->inputs()) {
+      // Don't schedule coupled nodes on their own.
+      if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+        node = NodeProperties::GetControlInput(node);
+      }
+
+      // Test schedulability condition by looking at unscheduled use count.
+      if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
+
+      queue->push(node);
+      while (!queue->empty()) {
+        VisitNode(queue->front());
+        queue->pop();
+      }
+    }
+  }
+
+  // Visits one node from the queue of schedulable nodes and determines its
+  // schedule late position. Also hoists nodes out of loops to find a more
+  // optimal scheduling position.
+  void VisitNode(Node* node) {
+    DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+
+    // Don't schedule nodes that are already scheduled.
+    if (schedule_->IsScheduled(node)) return;
+    DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
+
+    // Determine the dominating block for all of the uses of this node. It is
+    // the latest block that this node can be scheduled in.
+    Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
+    BasicBlock* block = GetCommonDominatorOfUses(node);
+    DCHECK_NOT_NULL(block);
+
+    // The schedule early block dominates the schedule late block.
+    BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
+    DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
+    Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
+          node->id(), node->op()->mnemonic(), block->id().ToInt(),
+          block->loop_depth(), min_block->id().ToInt());
+
+    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+    // into enclosing loop pre-headers until they would preceed their schedule
+    // early position.
+    BasicBlock* hoist_block = GetPreHeader(block);
+    while (hoist_block != NULL &&
+           hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+      Trace("  hoisting #%d:%s to block B%d\n", node->id(),
+            node->op()->mnemonic(), hoist_block->id().ToInt());
+      DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
+      block = hoist_block;
+      hoist_block = GetPreHeader(hoist_block);
+    }
+
+    // Schedule the node or a floating control structure.
+    if (NodeProperties::IsControl(node)) {
+      ScheduleFloatingControl(block, node);
+    } else {
+      ScheduleNode(block, node);
+    }
+  }
+
+  BasicBlock* GetPreHeader(BasicBlock* block) {
+    if (block->IsLoopHeader()) {
+      return block->dominator();
+    } else if (block->loop_header() != NULL) {
+      return block->loop_header()->dominator();
+    } else {
+      return NULL;
+    }
+  }
+
+  BasicBlock* GetCommonDominatorOfUses(Node* node) {
+    BasicBlock* block = NULL;
+    for (Edge edge : node->use_edges()) {
+      BasicBlock* use_block = GetBlockForUse(edge);
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+                                              : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    return block;
+  }
+
+  BasicBlock* GetBlockForUse(Edge edge) {
+    Node* use = edge.from();
+    IrOpcode::Value opcode = use->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      // If the use is from a coupled (i.e. floating) phi, compute the common
+      // dominator of its uses. This will not recurse more than one level.
+      if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
+        Trace("  inspecting uses of coupled #%d:%s\n", use->id(),
+              use->op()->mnemonic());
+        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+        return GetCommonDominatorOfUses(use);
+      }
+      // If the use is from a fixed (i.e. non-floating) phi, use the block
+      // of the corresponding control input to the merge.
+      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+        Trace("  input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
+              use->op()->mnemonic());
+        Node* merge = NodeProperties::GetControlInput(use, 0);
+        opcode = merge->opcode();
+        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+        use = NodeProperties::GetControlInput(merge, edge.index());
+      }
+    }
+    BasicBlock* result = schedule_->block(use);
+    if (result == NULL) return NULL;
+    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
+          use->op()->mnemonic(), result->id().ToInt());
+    return result;
+  }
+
+  void ScheduleFloatingControl(BasicBlock* block, Node* node) {
+    scheduler_->FuseFloatingControl(block, node);
+  }
+
+  void ScheduleNode(BasicBlock* block, Node* node) {
+    schedule_->PlanNode(block, node);
+    scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
+    scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+  Trace("--- SCHEDULE LATE ------------------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (Node* node : schedule_root_nodes_) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Schedule: Places nodes in dominator block of all their uses.
+  ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
+  schedule_late_visitor.Run(&schedule_root_nodes_);
 }
+
+
+// -----------------------------------------------------------------------------
+// Phase 6: Seal the final schedule.
+
+
+void Scheduler::SealFinalSchedule() {
+  Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+
+  // Serialize the assembly order and reverse-post-order numbering.
+  special_rpo_->SerializeRPOIntoSchedule();
+  special_rpo_->PrintAndVerifySpecialRPO();
+
+  // Add collected nodes for basic blocks to their blocks in the right order.
+  int block_num = 0;
+  for (NodeVector& nodes : scheduled_nodes_) {
+    BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
+    BasicBlock* block = schedule_->GetBlockById(id);
+    for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
+      schedule_->AddNode(block, *i);
+    }
+  }
 }
-}  // namespace v8::internal::compiler
+
+
+// -----------------------------------------------------------------------------
+
+
+void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
+  Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Schedule before control flow fusion:\n" << *schedule_;
+  }
+
+  // Iterate on phase 1: Build control-flow graph.
+  control_flow_builder_->Run(block, node);
+
+  // Iterate on phase 2: Compute special RPO and dominator tree.
+  special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
+  // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
+  for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+    b->set_dominator_depth(-1);
+    b->set_dominator(NULL);
+  }
+  PropagateImmediateDominators(block->rpo_next());
+
+  // Iterate on phase 4: Schedule nodes early.
+  // TODO(mstarzinger): The following loop gathering the propagation roots is a
+  // temporary solution and should be merged into the rest of the scheduler as
+  // soon as the approach settled for all floating loops.
+  NodeVector propagation_roots(control_flow_builder_->control_);
+  for (Node* node : control_flow_builder_->control_) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        propagation_roots.push_back(use);
+      }
+    }
+  }
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("propagation roots: ");
+    for (Node* node : propagation_roots) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+  ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+  schedule_early_visitor.Run(&propagation_roots);
+
+  // Move previously planned nodes.
+  // TODO(mstarzinger): Improve that by supporting bulk moves.
+  scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+  MovePlannedNodes(block, schedule_->block(node));
+
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << "Schedule after control flow fusion:\n" << *schedule_;
+  }
+}
+
+
+void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
+  Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
+        to->id().ToInt());
+  NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
+  for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
+    schedule_->SetBlockForNode(to, *i);
+    scheduled_nodes_[to->id().ToSize()].push_back(*i);
+  }
+  nodes->clear();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
index b21662f..9da0b6d 100644
--- a/src/compiler/scheduler.h
+++ b/src/compiler/scheduler.h
@@ -9,89 +9,106 @@
 
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
+#include "src/compiler/zone-pool.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+class CFGBuilder;
+class ControlEquivalence;
+class SpecialRPONumberer;
+
 // Computes a schedule from a graph, placing nodes into basic blocks and
 // ordering the basic blocks in the special RPO order.
 class Scheduler {
  public:
-  // The complete scheduling algorithm.
-  // Create a new schedule and place all nodes from the graph into it.
-  static Schedule* ComputeSchedule(Graph* graph);
+  // The complete scheduling algorithm. Creates a new schedule and places all
+  // nodes from the graph into it.
+  static Schedule* ComputeSchedule(Zone* zone, Graph* graph);
 
   // Compute the RPO of blocks in an existing schedule.
-  static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
-
-  // (Exposed for testing only)
-  // Build and connect the CFG for a node graph, but don't schedule nodes.
-  static void ComputeCFG(Graph* graph, Schedule* schedule);
+  static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
 
  private:
-  enum Placement { kUnknown, kSchedulable, kFixed };
+  // Placement of a node changes during scheduling. The placement state
+  // transitions over time while the scheduler is choosing a position:
+  //
+  //                   +---------------------+-----+----> kFixed
+  //                  /                     /     /
+  //    kUnknown ----+------> kCoupled ----+     /
+  //                  \                         /
+  //                   +----> kSchedulable ----+--------> kScheduled
+  //
+  // 1) GetPlacement(): kUnknown -> kCoupled|kSchedulable|kFixed
+  // 2) UpdatePlacement(): kCoupled|kSchedulable -> kFixed|kScheduled
+  enum Placement { kUnknown, kSchedulable, kFixed, kCoupled, kScheduled };
 
   // Per-node data tracked during scheduling.
   struct SchedulerData {
+    BasicBlock* minimum_block_;  // Minimum legal RPO placement.
     int unscheduled_count_;      // Number of unscheduled uses of this node.
-    int minimum_rpo_;            // Minimum legal RPO placement.
-    bool is_connected_control_;  // {true} if control-connected to the end node.
-    bool is_floating_control_;   // {true} if control, but not control-connected
-                                 // to the end node.
-    Placement placement_ : 3;    // Whether the node is fixed, schedulable,
-                                 // or not yet known.
+    Placement placement_;        // Whether the node is fixed, schedulable,
+                                 // coupled to another node, or not yet known.
   };
 
   Zone* zone_;
   Graph* graph_;
   Schedule* schedule_;
-  NodeVectorVector scheduled_nodes_;
-  NodeVector schedule_root_nodes_;
-  ZoneVector<SchedulerData> node_data_;
-  bool has_floating_control_;
+  NodeVectorVector scheduled_nodes_;     // Per-block list of nodes in reverse.
+  NodeVector schedule_root_nodes_;       // Fixed root nodes seed the worklist.
+  ZoneQueue<Node*> schedule_queue_;      // Worklist of schedulable nodes.
+  ZoneVector<SchedulerData> node_data_;  // Per-node data for all nodes.
+  CFGBuilder* control_flow_builder_;     // Builds basic blocks for controls.
+  SpecialRPONumberer* special_rpo_;      // Special RPO numbering of blocks.
+  ControlEquivalence* equivalence_;      // Control dependence equivalence.
 
   Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
 
-  SchedulerData DefaultSchedulerData();
-
-  SchedulerData* GetData(Node* node) {
-    DCHECK(node->id() < static_cast<int>(node_data_.size()));
-    return &node_data_[node->id()];
-  }
-
-  void BuildCFG();
+  inline SchedulerData DefaultSchedulerData();
+  inline SchedulerData* GetData(Node* node);
 
   Placement GetPlacement(Node* node);
+  void UpdatePlacement(Node* node, Placement placement);
 
-  int GetRPONumber(BasicBlock* block) {
-    DCHECK(block->rpo_number_ >= 0 &&
-           block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
-    DCHECK(schedule_->rpo_order_[block->rpo_number_] == block);
-    return block->rpo_number_;
-  }
+  inline bool IsCoupledControlEdge(Node* node, int index);
+  void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
+  void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
 
-  void GenerateImmediateDominatorTree();
   BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+  void PropagateImmediateDominators(BasicBlock* block);
 
+  // Phase 1: Build control-flow graph.
   friend class CFGBuilder;
+  void BuildCFG();
 
-  friend class ScheduleEarlyNodeVisitor;
-  void ScheduleEarly();
+  // Phase 2: Compute special RPO and dominator tree.
+  friend class SpecialRPONumberer;
+  void ComputeSpecialRPONumbering();
+  void GenerateImmediateDominatorTree();
 
+  // Phase 3: Prepare use counts for nodes.
   friend class PrepareUsesVisitor;
   void PrepareUses();
 
+  // Phase 4: Schedule nodes early.
+  friend class ScheduleEarlyNodeVisitor;
+  void ScheduleEarly();
+
+  // Phase 5: Schedule nodes late.
   friend class ScheduleLateNodeVisitor;
   void ScheduleLate();
 
-  bool ConnectFloatingControl();
+  // Phase 6: Seal the final schedule.
+  void SealFinalSchedule();
 
-  void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
+  void FuseFloatingControl(BasicBlock* block, Node* node);
+  void MovePlannedNodes(BasicBlock* from, BasicBlock* to);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_SCHEDULER_H_
diff --git a/src/compiler/select-lowering.cc b/src/compiler/select-lowering.cc
new file mode 100644
index 0000000..edecf58
--- /dev/null
+++ b/src/compiler/select-lowering.cc
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/select-lowering.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
+    : common_(common),
+      graph_(graph),
+      merges_(Merges::key_compare(), Merges::allocator_type(graph->zone())) {}
+
+
+SelectLowering::~SelectLowering() {}
+
+
+Reduction SelectLowering::Reduce(Node* node) {
+  if (node->opcode() != IrOpcode::kSelect) return NoChange();
+  SelectParameters const p = SelectParametersOf(node->op());
+
+  Node* cond = node->InputAt(0);
+  Node* vthen = node->InputAt(1);
+  Node* velse = node->InputAt(2);
+  Node* merge = nullptr;
+
+  // Check if we already have a diamond for this condition.
+  auto range = merges_.equal_range(cond);
+  for (auto i = range.first;; ++i) {
+    if (i == range.second) {
+      // Create a new diamond for this condition and remember its merge node.
+      Diamond d(graph(), common(), cond, p.hint());
+      merges_.insert(std::make_pair(cond, d.merge));
+      merge = d.merge;
+      break;
+    }
+
+    // If the diamond is reachable from the Select, merging them would result in
+    // an unschedulable graph, so we cannot reuse the diamond in that case.
+    merge = i->second;
+    if (!ReachableFrom(merge, node)) {
+      break;
+    }
+  }
+
+  // Create a Phi hanging off the previously determined merge.
+  node->set_op(common()->Phi(p.type(), 2));
+  node->ReplaceInput(0, vthen);
+  node->ReplaceInput(1, velse);
+  node->ReplaceInput(2, merge);
+  return Changed(node);
+}
+
+
+bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
+  // TODO(turbofan): This is probably horribly expensive, and it should be moved
+  // into node.h or somewhere else?!
+  Zone zone(graph()->zone()->isolate());
+  std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
+  BoolVector visited(graph()->NodeCount(), false, &zone);
+  queue.push(source);
+  visited[source->id()] = true;
+  while (!queue.empty()) {
+    Node* current = queue.front();
+    if (current == sink) return true;
+    queue.pop();
+    for (auto input : current->inputs()) {
+      if (!visited[input->id()]) {
+        queue.push(input);
+        visited[input->id()] = true;
+      }
+    }
+  }
+  return false;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/select-lowering.h b/src/compiler/select-lowering.h
new file mode 100644
index 0000000..05ea0e0
--- /dev/null
+++ b/src/compiler/select-lowering.h
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SELECT_LOWERING_H_
+#define V8_COMPILER_SELECT_LOWERING_H_
+
+#include <map>
+
+#include "src/compiler/graph-reducer.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+
+
+// Lowers Select nodes to diamonds.
+class SelectLowering FINAL : public Reducer {
+ public:
+  SelectLowering(Graph* graph, CommonOperatorBuilder* common);
+  ~SelectLowering();
+
+  Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  typedef std::multimap<Node*, Node*, std::less<Node*>,
+                        zone_allocator<std::pair<Node* const, Node*>>> Merges;
+
+  bool ReachableFrom(Node* const sink, Node* const source);
+
+  CommonOperatorBuilder* common() const { return common_; }
+  Graph* graph() const { return graph_; }
+
+  CommonOperatorBuilder* common_;
+  Graph* graph_;
+  Merges merges_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SELECT_LOWERING_H_
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index f794525..1461709 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -4,10 +4,15 @@
 
 #include "src/compiler/simplified-lowering.h"
 
+#include <limits>
+
 #include "src/base/bits.h"
 #include "src/code-factory.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-lowering.h"
@@ -66,11 +71,18 @@
         info_(zone->NewArray<NodeInfo>(count_)),
         nodes_(zone),
         replacements_(zone),
-        contains_js_nodes_(false),
         phase_(PROPAGATE),
         changer_(changer),
         queue_(zone) {
     memset(info_, 0, sizeof(NodeInfo) * count_);
+
+    Factory* f = zone->isolate()->factory();
+    safe_bit_range_ =
+        Type::Union(Type::Boolean(),
+                    Type::Range(f->NewNumber(0), f->NewNumber(1), zone), zone);
+    safe_int_additive_range_ =
+        Type::Range(f->NewNumber(-std::pow(2.0, 52.0)),
+                    f->NewNumber(std::pow(2.0, 52.0)), zone);
   }
 
   void Run(SimplifiedLowering* lowering) {
@@ -164,6 +176,30 @@
            NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
   }
 
+  void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
+    Node* input = node->InputAt(index);
+    if (phase_ == PROPAGATE) {
+      // In the propagate phase, propagate the usage information backward.
+      Enqueue(input, use);
+    } else {
+      // In the change phase, insert a change before the use if necessary.
+      MachineTypeUnion output = GetInfo(input)->output;
+      if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
+        // Output representation doesn't match usage.
+        TRACE(("  truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
+               node->op()->mnemonic(), index, input->id(),
+               input->op()->mnemonic()));
+        TRACE((" from "));
+        PrintInfo(output);
+        TRACE((" to "));
+        PrintInfo(use);
+        TRACE(("\n"));
+        Node* n = changer_->GetTruncatedWord32For(input, output);
+        node->ReplaceInput(index, n);
+      }
+    }
+  }
+
   void ProcessInput(Node* node, int index, MachineTypeUnion use) {
     Node* input = node->InputAt(index);
     if (phase_ == PROPAGATE) {
@@ -206,22 +242,19 @@
   // context, effect, and control inputs, assuming that value inputs should have
   // {kRepTagged} representation and can observe all output values {kTypeAny}.
   void VisitInputs(Node* node) {
-    InputIter i = node->inputs().begin();
-    for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
-         ++i, j--) {
-      ProcessInput(node, i.index(), kMachAnyTagged);  // Value inputs
+    auto i = node->input_edges().begin();
+    for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
+      ProcessInput(node, (*i).index(), kMachAnyTagged);  // Value inputs
     }
     for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
          ++i, j--) {
-      ProcessInput(node, i.index(), kMachAnyTagged);  // Context inputs
+      ProcessInput(node, (*i).index(), kMachAnyTagged);  // Context inputs
     }
-    for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
-         ++i, j--) {
-      Enqueue(*i);  // Effect inputs: just visit
+    for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
+      Enqueue((*i).to());  // Effect inputs: just visit
     }
-    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
-         ++i, j--) {
-      Enqueue(*i);  // Control inputs: just visit
+    for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
+      Enqueue((*i).to());  // Control inputs: just visit
     }
     SetOutput(node, kMachAnyTagged);
   }
@@ -267,60 +300,83 @@
   void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
   void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
 
-  // Helper for handling phis.
-  void VisitPhi(Node* node, MachineTypeUnion use,
-                SimplifiedLowering* lowering) {
-    // First, propagate the usage information to inputs of the phi.
-    if (!lower()) {
-      int values = OperatorProperties::GetValueInputCount(node->op());
-      // Propagate {use} of the phi to value inputs, and 0 to control.
-      Node::Inputs inputs = node->inputs();
-      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-           ++iter, --values) {
-        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
-        ProcessInput(node, iter.index(), values > 0 ? use : 0);
-      }
-    }
-    // Phis adapt to whatever output representation their uses demand,
-    // pushing representation changes to their inputs.
-    MachineTypeUnion use_rep = GetUseInfo(node) & kRepMask;
-    MachineTypeUnion use_type = GetUseInfo(node) & kTypeMask;
-    MachineTypeUnion rep = 0;
-    if (use_rep & kRepTagged) {
-      rep = kRepTagged;  // Tagged overrides everything.
-    } else if (use_rep & kRepFloat64) {
-      rep = kRepFloat64;
-    } else if (use_rep & kRepWord64) {
-      rep = kRepWord64;
-    } else if (use_rep & kRepWord32) {
-      rep = kRepWord32;
-    } else if (use_rep & kRepBit) {
-      rep = kRepBit;
-    } else {
-      // There was no representation associated with any of the uses.
-      // TODO(titzer): Select the best rep using phi's type, not the usage type?
-      if (use_type & kTypeAny) {
-        rep = kRepTagged;
-      } else if (use_type & kTypeNumber) {
-        rep = kRepFloat64;
-      } else if (use_type & kTypeInt64 || use_type & kTypeUint64) {
-        rep = kRepWord64;
-      } else if (use_type & kTypeInt32 || use_type & kTypeUint32) {
-        rep = kRepWord32;
-      } else if (use_type & kTypeBool) {
-        rep = kRepBit;
-      } else {
-        UNREACHABLE();  // should have at least a usage type!
-      }
-    }
-    // Preserve the usage type, but set the representation.
+  // Infer representation for phi-like nodes.
+  MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
+    // Phis adapt to the output representation their uses demand.
     Type* upper = NodeProperties::GetBounds(node).upper;
-    MachineTypeUnion output_type = rep | changer_->TypeFromUpperBound(upper);
+    if ((use & kRepMask) == kRepTagged) {
+      // only tagged uses.
+      return kRepTagged;
+    } else if (upper->Is(Type::Integral32())) {
+      // Integer within [-2^31, 2^32[ range.
+      if ((use & kRepMask) == kRepFloat64) {
+        // only float64 uses.
+        return kRepFloat64;
+      } else if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
+        // multiple uses, but we are within 32 bits range => pick kRepWord32.
+        return kRepWord32;
+      } else if ((use & kRepMask) == kRepWord32 ||
+                 (use & kTypeMask) == kTypeInt32 ||
+                 (use & kTypeMask) == kTypeUint32) {
+        // We only use 32 bits or we use the result consistently.
+        return kRepWord32;
+      } else {
+        return kRepFloat64;
+      }
+    } else if (IsSafeBitOperand(node)) {
+      // multiple uses => pick kRepBit.
+      return kRepBit;
+    } else if (upper->Is(Type::Number())) {
+      // multiple uses => pick kRepFloat64.
+      return kRepFloat64;
+    }
+    return kRepTagged;
+  }
+
+  // Helper for handling selects.
+  void VisitSelect(Node* node, MachineTypeUnion use,
+                   SimplifiedLowering* lowering) {
+    ProcessInput(node, 0, kRepBit);
+    MachineType output = GetRepresentationForPhi(node, use);
+
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    MachineType output_type =
+        static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
     SetOutput(node, output_type);
 
     if (lower()) {
-      int values = OperatorProperties::GetValueInputCount(node->op());
+      // Update the select operator.
+      SelectParameters p = SelectParametersOf(node->op());
+      MachineType type = static_cast<MachineType>(output_type);
+      if (type != p.type()) {
+        node->set_op(lowering->common()->Select(type, p.hint()));
+      }
 
+      // Convert inputs to the output representation of this select.
+      ProcessInput(node, 1, output_type);
+      ProcessInput(node, 2, output_type);
+    } else {
+      // Propagate {use} of the select to value inputs.
+      MachineType use_type =
+          static_cast<MachineType>((use & kTypeMask) | output);
+      ProcessInput(node, 1, use_type);
+      ProcessInput(node, 2, use_type);
+    }
+  }
+
+  // Helper for handling phis.
+  void VisitPhi(Node* node, MachineTypeUnion use,
+                SimplifiedLowering* lowering) {
+    MachineType output = GetRepresentationForPhi(node, use);
+
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    MachineType output_type =
+        static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
+    SetOutput(node, output_type);
+
+    int values = node->op()->ValueInputCount();
+
+    if (lower()) {
       // Update the phi operator.
       MachineType type = static_cast<MachineType>(output_type);
       if (type != OpParameter<MachineType>(node)) {
@@ -328,11 +384,19 @@
       }
 
       // Convert inputs to the output representation of this phi.
-      Node::Inputs inputs = node->inputs();
-      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-           ++iter, --values) {
+      for (Edge const edge : node->input_edges()) {
         // TODO(titzer): it'd be nice to have distinguished edge kinds here.
-        ProcessInput(node, iter.index(), values > 0 ? output_type : 0);
+        ProcessInput(node, edge.index(), values > 0 ? output_type : 0);
+        values--;
+      }
+    } else {
+      // Propagate {use} of the phi to value inputs, and 0 to control.
+      MachineType use_type =
+          static_cast<MachineType>((use & kTypeMask) | output);
+      for (Edge const edge : node->input_edges()) {
+        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+        ProcessInput(node, edge.index(), values > 0 ? use_type : 0);
+        values--;
       }
     }
   }
@@ -349,13 +413,55 @@
     return changer_->Float64OperatorFor(node->opcode());
   }
 
-  static MachineType AssumeImplicitFloat32Change(MachineType type) {
-    // TODO(titzer): Assume loads of float32 change representation to float64.
-    // Fix this with full support for float32 representations.
-    if (type & kRepFloat32) {
-      return static_cast<MachineType>((type & ~kRepFloat32) | kRepFloat64);
-    }
-    return type;
+  bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
+    return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
+  }
+
+  bool IsSafeBitOperand(Node* node) {
+    Type* type = NodeProperties::GetBounds(node).upper;
+    return type->Is(safe_bit_range_);
+  }
+
+  bool IsSafeIntAdditiveOperand(Node* node) {
+    Type* type = NodeProperties::GetBounds(node).upper;
+    // TODO(jarin): Unfortunately, bitset types are not subtypes of larger
+    // range types, so we have to explicitly check for Integral32 here
+    // (in addition to the safe integer range). Once we fix subtyping for
+    // ranges, we should simplify this.
+    return type->Is(safe_int_additive_range_) || type->Is(Type::Integral32());
+  }
+
+  bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
+    return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
+           IsSafeIntAdditiveOperand(node->InputAt(1)) &&
+           !CanObserveNonInt32(use);
+  }
+
+  bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
+    return BothInputsAre(node, Type::Unsigned32()) && !CanObserveNonUint32(use);
+  }
+
+  bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
+    return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
+           IsSafeIntAdditiveOperand(node->InputAt(1)) &&
+           !CanObserveNonUint32(use);
+  }
+
+  bool CanObserveNonInt32(MachineTypeUnion use) {
+    return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
+  }
+
+  bool CanObserveMinusZero(MachineTypeUnion use) {
+    // TODO(turbofan): technically Uint32 cannot observe minus zero either.
+    return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
+  }
+
+  bool CanObserveNaN(MachineTypeUnion use) {
+    return (use & (kTypeNumber | kTypeAny)) != 0;
+  }
+
+  bool CanObserveNonUint32(MachineTypeUnion use) {
+    return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
   }
 
   // Dispatching routine for visiting the node {node} with the usage {use}.
@@ -401,6 +507,8 @@
         ProcessInput(node, 0, kRepBit);
         Enqueue(NodeProperties::GetControlInput(node, 0));
         break;
+      case IrOpcode::kSelect:
+        return VisitSelect(node, use, lowering);
       case IrOpcode::kPhi:
         return VisitPhi(node, use, lowering);
 
@@ -415,19 +523,40 @@
 #define DEFINE_JS_CASE(x) case IrOpcode::k##x:
         JS_OP_LIST(DEFINE_JS_CASE)
 #undef DEFINE_JS_CASE
-        contains_js_nodes_ = true;
         VisitInputs(node);
         return SetOutput(node, kRepTagged);
 
       //------------------------------------------------------------------
       // Simplified operators.
       //------------------------------------------------------------------
+      case IrOpcode::kAnyToBoolean: {
+        if (IsSafeBitOperand(node->InputAt(0))) {
+          VisitUnop(node, kRepBit, kRepBit);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
+          if (lower()) {
+            // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
+            Operator::Properties properties = node->op()->properties();
+            Callable callable = CodeFactory::ToBoolean(
+                jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+            CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
+            CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+                callable.descriptor(), 0, flags, properties, jsgraph_->zone());
+            node->set_op(jsgraph_->common()->Call(desc));
+            node->InsertInput(jsgraph_->zone(), 0,
+                              jsgraph_->HeapConstant(callable.code()));
+            node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          }
+        }
+        break;
+      }
       case IrOpcode::kBooleanNot: {
         if (lower()) {
           MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
           if (input & kRepBit) {
-            // BooleanNot(x: kRepBit) => WordEqual(x, #0)
-            node->set_op(lowering->machine()->WordEqual());
+            // BooleanNot(x: kRepBit) => Word32Equal(x, #0)
+            node->set_op(lowering->machine()->Word32Equal());
             node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
           } else {
             // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
@@ -482,16 +611,26 @@
       case IrOpcode::kNumberSubtract: {
         // Add and subtract reduce to Int32Add/Sub if the inputs
         // are already integers and all uses are truncating.
-        if (BothInputsAre(node, Type::Signed32()) &&
-            (use & (kTypeUint32 | kTypeNumber | kTypeAny)) == 0) {
+        if (CanLowerToInt32Binop(node, use)) {
           // => signed Int32Add/Sub
           VisitInt32Binop(node);
           if (lower()) node->set_op(Int32Op(node));
-        } else if (BothInputsAre(node, Type::Unsigned32()) &&
-                   (use & (kTypeInt32 | kTypeNumber | kTypeAny)) == 0) {
+        } else if (CanLowerToInt32AdditiveBinop(node, use)) {
+          // => signed Int32Add/Sub, truncating inputs
+          ProcessTruncateWord32Input(node, 0, kTypeInt32);
+          ProcessTruncateWord32Input(node, 1, kTypeInt32);
+          SetOutput(node, kMachInt32);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (CanLowerToUint32Binop(node, use)) {
           // => unsigned Int32Add/Sub
           VisitUint32Binop(node);
           if (lower()) node->set_op(Uint32Op(node));
+        } else if (CanLowerToUint32AdditiveBinop(node, use)) {
+          // => signed Int32Add/Sub, truncating inputs
+          ProcessTruncateWord32Input(node, 0, kTypeUint32);
+          ProcessTruncateWord32Input(node, 1, kTypeUint32);
+          SetOutput(node, kMachUint32);
+          if (lower()) node->set_op(Uint32Op(node));
         } else {
           // => Float64Add/Sub
           VisitFloat64Binop(node);
@@ -499,54 +638,110 @@
         }
         break;
       }
-      case IrOpcode::kNumberMultiply:
-      case IrOpcode::kNumberDivide:
+      case IrOpcode::kNumberMultiply: {
+        NumberMatcher right(node->InputAt(1));
+        if (right.IsInRange(-1048576, 1048576)) {  // must fit double mantissa.
+          if (CanLowerToInt32Binop(node, use)) {
+            // => signed Int32Mul
+            VisitInt32Binop(node);
+            if (lower()) node->set_op(Int32Op(node));
+            break;
+          }
+        }
+        // => Float64Mul
+        VisitFloat64Binop(node);
+        if (lower()) node->set_op(Float64Op(node));
+        break;
+      }
+      case IrOpcode::kNumberDivide: {
+        if (CanLowerToInt32Binop(node, use)) {
+          // => signed Int32Div
+          VisitInt32Binop(node);
+          if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+          break;
+        }
+        if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+          // => unsigned Uint32Div
+          VisitUint32Binop(node);
+          if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+          break;
+        }
+        // => Float64Div
+        VisitFloat64Binop(node);
+        if (lower()) node->set_op(Float64Op(node));
+        break;
+      }
       case IrOpcode::kNumberModulus: {
-        // Float64Mul/Div/Mod
+        if (CanLowerToInt32Binop(node, use)) {
+          // => signed Int32Mod
+          VisitInt32Binop(node);
+          if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+          break;
+        }
+        if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+          // => unsigned Uint32Mod
+          VisitUint32Binop(node);
+          if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+          break;
+        }
+        // => Float64Mod
         VisitFloat64Binop(node);
         if (lower()) node->set_op(Float64Op(node));
         break;
       }
       case IrOpcode::kNumberToInt32: {
         MachineTypeUnion use_rep = use & kRepMask;
-        if (lower()) {
-          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
-          if ((in & kTypeMask) == kTypeInt32 || (in & kRepMask) == kRepWord32) {
-            // If the input has type int32, or is already a word32, just change
-            // representation if necessary.
-            VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
-            DeferReplacement(node, node->InputAt(0));
-          } else {
-            // Require the input in float64 format and perform truncation.
-            // TODO(turbofan): avoid a truncation with a smi check.
-            VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
-            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
-          }
+        Node* input = node->InputAt(0);
+        Type* in_upper = NodeProperties::GetBounds(input).upper;
+        MachineTypeUnion in = GetInfo(input)->output;
+        if (in_upper->Is(Type::Signed32())) {
+          // If the input has type int32, pass through representation.
+          VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeUint32 ||
+                   in_upper->Is(Type::Unsigned32())) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeInt32 ||
+                   (in & kRepMask) == kRepWord32) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          // Propagate a type to the input, but pass through representation.
-          VisitUnop(node, kTypeInt32, kTypeInt32 | use_rep);
+          // Require the input in float64 format and perform truncation.
+          // TODO(turbofan): avoid a truncation with a smi check.
+          VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
+          if (lower())
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
         }
         break;
       }
       case IrOpcode::kNumberToUint32: {
         MachineTypeUnion use_rep = use & kRepMask;
-        if (lower()) {
-          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
-          if ((in & kTypeMask) == kTypeUint32 ||
-              (in & kRepMask) == kRepWord32) {
-            // The input has type int32, just change representation.
-            VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
-            DeferReplacement(node, node->InputAt(0));
-          } else {
-            // Require the input in float64 format to perform truncation.
-            // TODO(turbofan): avoid the truncation with a smi check.
-            VisitUnop(node, kTypeUint32 | kRepFloat64,
-                      kTypeUint32 | kRepWord32);
-            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
-          }
+        Node* input = node->InputAt(0);
+        Type* in_upper = NodeProperties::GetBounds(input).upper;
+        MachineTypeUnion in = GetInfo(input)->output;
+        if (in_upper->Is(Type::Unsigned32())) {
+          // If the input has type uint32, pass through representation.
+          VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeUint32 ||
+                   in_upper->Is(Type::Unsigned32())) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeInt32 ||
+                   (in & kRepMask) == kRepWord32) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          // Propagate a type to the input, but pass through representation.
-          VisitUnop(node, kTypeUint32, kTypeUint32 | use_rep);
+          // Require the input in float64 format and perform truncation.
+          // TODO(turbofan): avoid a truncation with a smi check.
+          VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
+          if (lower())
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
         }
         break;
       }
@@ -579,47 +774,115 @@
         FieldAccess access = FieldAccessOf(node->op());
         ProcessInput(node, 0, changer_->TypeForBasePointer(access));
         ProcessRemainingInputs(node, 1);
-        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        SetOutput(node, access.machine_type);
         if (lower()) lowering->DoLoadField(node);
         break;
       }
       case IrOpcode::kStoreField: {
         FieldAccess access = FieldAccessOf(node->op());
         ProcessInput(node, 0, changer_->TypeForBasePointer(access));
-        ProcessInput(node, 1, AssumeImplicitFloat32Change(access.machine_type));
+        ProcessInput(node, 1, access.machine_type);
         ProcessRemainingInputs(node, 2);
         SetOutput(node, 0);
         if (lower()) lowering->DoStoreField(node);
         break;
       }
-      case IrOpcode::kLoadElement: {
-        ElementAccess access = ElementAccessOf(node->op());
-        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
-        ProcessInput(node, 1, kMachInt32);  // element index
+      case IrOpcode::kLoadBuffer: {
+        BufferAccess access = BufferAccessOf(node->op());
+        ProcessInput(node, 0, kMachPtr);    // buffer
+        ProcessInput(node, 1, kMachInt32);  // offset
         ProcessInput(node, 2, kMachInt32);  // length
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        // Tagged overrides everything if we have to do a typed array bounds
+        // check, because we may need to return undefined then.
+        MachineType output_type;
+        if (use & kRepTagged) {
+          output_type = kMachAnyTagged;
+        } else if (use & kRepFloat64) {
+          if (access.machine_type() & kRepFloat32) {
+            output_type = access.machine_type();
+          } else {
+            output_type = kMachFloat64;
+          }
+        } else if (use & kRepFloat32) {
+          output_type = kMachFloat32;
+        } else {
+          output_type = access.machine_type();
+        }
+        SetOutput(node, output_type);
+        if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+        break;
+      }
+      case IrOpcode::kStoreBuffer: {
+        BufferAccess access = BufferAccessOf(node->op());
+        ProcessInput(node, 0, kMachPtr);               // buffer
+        ProcessInput(node, 1, kMachInt32);             // offset
+        ProcessInput(node, 2, kMachInt32);             // length
+        ProcessInput(node, 3, access.machine_type());  // value
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreBuffer(node);
+        break;
+      }
+      case IrOpcode::kLoadElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));  // base
+        ProcessInput(node, 1, kMachInt32);                            // index
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, access.machine_type);
         if (lower()) lowering->DoLoadElement(node);
         break;
       }
       case IrOpcode::kStoreElement: {
         ElementAccess access = ElementAccessOf(node->op());
-        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
-        ProcessInput(node, 1, kMachInt32);  // element index
-        ProcessInput(node, 2, kMachInt32);  // length
-        ProcessInput(node, 3, AssumeImplicitFloat32Change(access.machine_type));
-        ProcessRemainingInputs(node, 4);
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));  // base
+        ProcessInput(node, 1, kMachInt32);                            // index
+        ProcessInput(node, 2, access.machine_type);                   // value
+        ProcessRemainingInputs(node, 3);
         SetOutput(node, 0);
         if (lower()) lowering->DoStoreElement(node);
         break;
       }
+      case IrOpcode::kObjectIsSmi: {
+        ProcessInput(node, 0, kMachAnyTagged);
+        SetOutput(node, kRepBit | kTypeBool);
+        if (lower()) {
+          Node* is_tagged = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->WordAnd(), node->InputAt(0),
+              jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
+          Node* is_smi = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->WordEqual(), is_tagged,
+              jsgraph_->Int32Constant(kSmiTag));
+          DeferReplacement(node, is_smi);
+        }
+        break;
+      }
+      case IrOpcode::kObjectIsNonNegativeSmi: {
+        ProcessInput(node, 0, kMachAnyTagged);
+        SetOutput(node, kRepBit | kTypeBool);
+        if (lower()) {
+          Node* is_tagged = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->WordAnd(), node->InputAt(0),
+              jsgraph_->Int32Constant(static_cast<int>(kSmiTagMask)));
+          Node* is_smi = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->WordEqual(), is_tagged,
+              jsgraph_->Int32Constant(kSmiTag));
+          Node* is_non_neg = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->IntLessThanOrEqual(),
+              jsgraph_->Int32Constant(0), node->InputAt(0));
+          Node* is_non_neg_smi = jsgraph_->graph()->NewNode(
+              jsgraph_->machine()->Word32And(), is_smi, is_non_neg);
+          DeferReplacement(node, is_non_neg_smi);
+        }
+        break;
+      }
 
       //------------------------------------------------------------------
       // Machine-level operators.
       //------------------------------------------------------------------
       case IrOpcode::kLoad: {
         // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
-        MachineType tBase = kRepTagged;
+        MachineTypeUnion tBase = kRepTagged | kMachPtr;
         LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
         ProcessInput(node, 0, tBase);   // pointer or object
         ProcessInput(node, 1, kMachInt32);  // index
@@ -629,7 +892,7 @@
       }
       case IrOpcode::kStore: {
         // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
-        MachineType tBase = kRepTagged;
+        MachineTypeUnion tBase = kRepTagged | kMachPtr;
         StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
         ProcessInput(node, 0, tBase);   // pointer or object
         ProcessInput(node, 1, kMachInt32);  // index
@@ -640,7 +903,7 @@
       }
       case IrOpcode::kWord32Shr:
         // We output unsigned int32 for shift right because JavaScript.
-        return VisitBinop(node, kRepWord32, kRepWord32 | kTypeUint32);
+        return VisitBinop(node, kMachUint32, kMachUint32);
       case IrOpcode::kWord32And:
       case IrOpcode::kWord32Or:
       case IrOpcode::kWord32Xor:
@@ -656,11 +919,13 @@
       case IrOpcode::kInt32Add:
       case IrOpcode::kInt32Sub:
       case IrOpcode::kInt32Mul:
+      case IrOpcode::kInt32MulHigh:
       case IrOpcode::kInt32Div:
       case IrOpcode::kInt32Mod:
         return VisitInt32Binop(node);
-      case IrOpcode::kInt32UDiv:
-      case IrOpcode::kInt32UMod:
+      case IrOpcode::kUint32Div:
+      case IrOpcode::kUint32Mod:
+      case IrOpcode::kUint32MulHigh:
         return VisitUint32Binop(node);
       case IrOpcode::kInt32LessThan:
       case IrOpcode::kInt32LessThanOrEqual:
@@ -680,8 +945,11 @@
       case IrOpcode::kInt64LessThanOrEqual:
         return VisitInt64Cmp(node);
 
-      case IrOpcode::kInt64UDiv:
-      case IrOpcode::kInt64UMod:
+      case IrOpcode::kUint64LessThan:
+        return VisitUint64Cmp(node);
+
+      case IrOpcode::kUint64Div:
+      case IrOpcode::kUint64Mod:
         return VisitUint64Binop(node);
 
       case IrOpcode::kWord64And:
@@ -700,11 +968,17 @@
       case IrOpcode::kChangeUint32ToUint64:
         return VisitUnop(node, kTypeUint32 | kRepWord32,
                          kTypeUint32 | kRepWord64);
+      case IrOpcode::kTruncateFloat64ToFloat32:
+        return VisitUnop(node, kTypeNumber | kRepFloat64,
+                         kTypeNumber | kRepFloat32);
       case IrOpcode::kTruncateInt64ToInt32:
         // TODO(titzer): Is kTypeInt32 correct here?
         return VisitUnop(node, kTypeInt32 | kRepWord64,
                          kTypeInt32 | kRepWord32);
 
+      case IrOpcode::kChangeFloat32ToFloat64:
+        return VisitUnop(node, kTypeNumber | kRepFloat32,
+                         kTypeNumber | kRepFloat64);
       case IrOpcode::kChangeInt32ToFloat64:
         return VisitUnop(node, kTypeInt32 | kRepWord32,
                          kTypeInt32 | kRepFloat64);
@@ -725,11 +999,23 @@
       case IrOpcode::kFloat64Mod:
         return VisitFloat64Binop(node);
       case IrOpcode::kFloat64Sqrt:
+      case IrOpcode::kFloat64Floor:
+      case IrOpcode::kFloat64Ceil:
+      case IrOpcode::kFloat64RoundTruncate:
+      case IrOpcode::kFloat64RoundTiesAway:
         return VisitUnop(node, kMachFloat64, kMachFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
       case IrOpcode::kFloat64LessThanOrEqual:
         return VisitFloat64Cmp(node);
+      case IrOpcode::kLoadStackPointer:
+        return VisitLeaf(node, kMachPtr);
+      case IrOpcode::kStateValues:
+        for (int i = 0; i < node->InputCount(); i++) {
+          ProcessInput(node, i, kTypeAny);
+        }
+        SetOutput(node, kMachAnyTagged);
+        break;
       default:
         VisitInputs(node);
         break;
@@ -737,6 +1023,11 @@
   }
 
   void DeferReplacement(Node* node, Node* replacement) {
+    if (FLAG_trace_representation) {
+      TRACE(("defer replacement #%d:%s with #%d:%s\n", node->id(),
+             node->op()->mnemonic(), replacement->id(),
+             replacement->op()->mnemonic()));
+    }
     if (replacement->id() < count_) {
       // Replace with a previously existing node eagerly.
       node->ReplaceUses(replacement);
@@ -770,10 +1061,11 @@
   NodeInfo* info_;                  // node id -> usage information
   NodeVector nodes_;                // collected nodes
   NodeVector replacements_;         // replacements to be done after lowering
-  bool contains_js_nodes_;          // {true} if a JS operator was seen
   Phase phase_;                     // current phase of algorithm
   RepresentationChanger* changer_;  // for inserting representation changes
   ZoneQueue<Node*> queue_;          // queue for traversing the graph
+  Type* safe_bit_range_;
+  Type* safe_int_additive_range_;
 
   NodeInfo* GetInfo(Node* node) {
     DCHECK(node->id() >= 0);
@@ -797,7 +1089,7 @@
   SimplifiedOperatorBuilder simplified(graph()->zone());
   RepresentationChanger changer(jsgraph(), &simplified,
                                 graph()->zone()->isolate());
-  RepresentationSelector selector(jsgraph(), zone(), &changer);
+  RepresentationSelector selector(jsgraph(), zone_, &changer);
   selector.Run(this);
 }
 
@@ -837,8 +1129,8 @@
 void SimplifiedLowering::DoLoadField(Node* node) {
   const FieldAccess& access = FieldAccessOf(node->op());
   node->set_op(machine()->Load(access.machine_type));
-  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
-  node->InsertInput(zone(), 1, offset);
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+  node->InsertInput(graph()->zone(), 1, offset);
 }
 
 
@@ -848,22 +1140,96 @@
       access.base_is_tagged, access.machine_type, access.type);
   node->set_op(
       machine()->Store(StoreRepresentation(access.machine_type, kind)));
-  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
-  node->InsertInput(zone(), 1, offset);
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+  node->InsertInput(graph()->zone(), 1, offset);
 }
 
 
 Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
-                                       Node* index) {
-  int element_size = ElementSizeOf(access.machine_type);
-  if (element_size != 1) {
-    index = graph()->NewNode(machine()->Int32Mul(),
-                             jsgraph()->Int32Constant(element_size), index);
+                                       Node* const key) {
+  Node* index = key;
+  const int element_size_shift = ElementSizeLog2Of(access.machine_type);
+  if (element_size_shift) {
+    index = graph()->NewNode(machine()->Word32Shl(), index,
+                             jsgraph()->Int32Constant(element_size_shift));
   }
-  int fixed_offset = access.header_size - access.tag();
-  if (fixed_offset == 0) return index;
-  return graph()->NewNode(machine()->Int32Add(), index,
-                          jsgraph()->Int32Constant(fixed_offset));
+  const int fixed_offset = access.header_size - access.tag();
+  if (fixed_offset) {
+    index = graph()->NewNode(machine()->Int32Add(), index,
+                             jsgraph()->Int32Constant(fixed_offset));
+  }
+  if (machine()->Is64()) {
+    // TODO(turbofan): This is probably only correct for typed arrays, and only
+    // if the typed arrays are at most 2GiB in size, which happens to match
+    // exactly our current situation.
+    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+  }
+  return index;
+}
+
+
+void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+                                      RepresentationChanger* changer) {
+  DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
+  DCHECK_NE(kMachNone, RepresentationOf(output_type));
+  MachineType const type = BufferAccessOf(node->op()).machine_type();
+  if (output_type != type) {
+    Node* const buffer = node->InputAt(0);
+    Node* const offset = node->InputAt(1);
+    Node* const length = node->InputAt(2);
+    Node* const effect = node->InputAt(3);
+    Node* const control = node->InputAt(4);
+    Node* const index =
+        machine()->Is64()
+            ? graph()->NewNode(machine()->ChangeUint32ToUint64(), offset)
+            : offset;
+
+    Node* check = graph()->NewNode(machine()->Uint32LessThan(), offset, length);
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* etrue =
+        graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
+    Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    Node* vfalse;
+    if (output_type & kRepTagged) {
+      vfalse = jsgraph()->UndefinedConstant();
+    } else if (output_type & kRepFloat64) {
+      vfalse =
+          jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+    } else if (output_type & kRepFloat32) {
+      vfalse =
+          jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
+    } else {
+      vfalse = jsgraph()->Int32Constant(0);
+    }
+
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+
+    // Replace effect uses of {node} with the {ephi}.
+    NodeProperties::ReplaceWithValue(node, node, ephi);
+
+    // Turn the {node} into a Phi.
+    node->set_op(common()->Phi(output_type, 2));
+    node->ReplaceInput(0, vtrue);
+    node->ReplaceInput(1, vfalse);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  } else {
+    node->set_op(machine()->CheckedLoad(type));
+  }
+}
+
+
+void SimplifiedLowering::DoStoreBuffer(Node* node) {
+  DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
+  MachineType const type = BufferAccessOf(node->op()).machine_type();
+  node->set_op(machine()->CheckedStore(type));
 }
 
 
@@ -871,32 +1237,32 @@
   const ElementAccess& access = ElementAccessOf(node->op());
   node->set_op(machine()->Load(access.machine_type));
   node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
-  node->RemoveInput(2);
 }
 
 
 void SimplifiedLowering::DoStoreElement(Node* node) {
   const ElementAccess& access = ElementAccessOf(node->op());
-  WriteBarrierKind kind = ComputeWriteBarrierKind(
-      access.base_is_tagged, access.machine_type, access.type);
-  node->set_op(
-      machine()->Store(StoreRepresentation(access.machine_type, kind)));
+  node->set_op(machine()->Store(StoreRepresentation(
+      access.machine_type,
+      ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
+                              access.type))));
   node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
-  node->RemoveInput(2);
 }
 
 
 void SimplifiedLowering::DoStringAdd(Node* node) {
+  Operator::Properties properties = node->op()->properties();
   Callable callable = CodeFactory::StringAdd(
       zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-  CallDescriptor* desc =
-      Linkage::GetStubCallDescriptor(callable.descriptor(), 0, flags, zone());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      callable.descriptor(), 0, flags, properties, zone());
   node->set_op(common()->Call(desc));
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(callable.code()));
-  node->AppendInput(zone(), jsgraph()->UndefinedConstant());
-  node->AppendInput(zone(), graph()->start());
-  node->AppendInput(zone(), graph()->start());
+  node->InsertInput(graph()->zone(), 0,
+                    jsgraph()->HeapConstant(callable.code()));
+  node->AppendInput(graph()->zone(), jsgraph()->UndefinedConstant());
+  node->AppendInput(graph()->zone(), graph()->start());
+  node->AppendInput(graph()->zone(), graph()->start());
 }
 
 
@@ -919,6 +1285,213 @@
 }
 
 
+Node* SimplifiedLowering::Int32Div(Node* const node) {
+  Int32BinopMatcher m(node);
+  Node* const zero = jsgraph()->Int32Constant(0);
+  Node* const lhs = m.left().node();
+  Node* const rhs = m.right().node();
+
+  if (m.right().Is(-1)) {
+    return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+  } else if (m.right().Is(0)) {
+    return rhs;
+  } else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
+    return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
+  }
+
+  Diamond if_zero(graph(), common(),
+                  graph()->NewNode(machine()->Word32Equal(), rhs, zero),
+                  BranchHint::kFalse);
+
+  Diamond if_minus_one(graph(), common(),
+                       graph()->NewNode(machine()->Word32Equal(), rhs,
+                                        jsgraph()->Int32Constant(-1)),
+                       BranchHint::kFalse);
+  if_minus_one.Nest(if_zero, false);
+  Node* sub = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+  Node* div =
+      graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_minus_one.if_false);
+
+  return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, sub, div));
+}
+
+
+Node* SimplifiedLowering::Int32Mod(Node* const node) {
+  Int32BinopMatcher m(node);
+  Node* const zero = jsgraph()->Int32Constant(0);
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
+  Node* const lhs = m.left().node();
+  Node* const rhs = m.right().node();
+
+  if (m.right().Is(-1) || m.right().Is(0)) {
+    return zero;
+  } else if (m.right().HasValue()) {
+    return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
+  }
+
+  // General case for signed integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if 0 < rhs then
+  //     msk = rhs - 1
+  //     if rhs & msk != 0 then
+  //       lhs % rhs
+  //     else
+  //       if lhs < 0 then
+  //         -(-lhs & msk)
+  //       else
+  //         lhs & msk
+  //   else
+  //     if rhs < -1 then
+  //       lhs % rhs
+  //     else
+  //       zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  const Operator* const merge_op = common()->Merge(2);
+  const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+
+  Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
+
+    Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* true2 = graph()->NewNode(
+          machine()->Int32Sub(), zero,
+          graph()->NewNode(machine()->Word32And(),
+                           graph()->NewNode(machine()->Int32Sub(), zero, lhs),
+                           msk));
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* false2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+      if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+      false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+    }
+
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* false0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1 = zero;
+
+    if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
+}
+
+
+Node* SimplifiedLowering::Uint32Div(Node* const node) {
+  Uint32BinopMatcher m(node);
+  Node* const zero = jsgraph()->Uint32Constant(0);
+  Node* const lhs = m.left().node();
+  Node* const rhs = m.right().node();
+
+  if (m.right().Is(0)) {
+    return zero;
+  } else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
+    return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
+  }
+
+  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+  Diamond d(graph(), common(), check, BranchHint::kFalse);
+  Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
+  return d.Phi(kMachUint32, zero, div);
+}
+
+
+Node* SimplifiedLowering::Uint32Mod(Node* const node) {
+  Uint32BinopMatcher m(node);
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
+  Node* const zero = jsgraph()->Uint32Constant(0);
+  Node* const lhs = m.left().node();
+  Node* const rhs = m.right().node();
+
+  if (m.right().Is(0)) {
+    return zero;
+  } else if (m.right().HasValue()) {
+    return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
+  }
+
+  // General case for unsigned integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if rhs then
+  //     msk = rhs - 1
+  //     if rhs & msk != 0 then
+  //       lhs % rhs
+  //     else
+  //       lhs & msk
+  //   else
+  //     zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  const Operator* const merge_op = common()->Merge(2);
+  const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
+
+    Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* false0 = zero;
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
+}
+
+
 void SimplifiedLowering::DoStringEqual(Node* node) {
   node->set_op(machine()->WordEqual());
   node->ReplaceInput(0, StringComparison(node, false));
@@ -939,7 +1512,6 @@
   node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
 }
 
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 2ba7e3b..b21cf21 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -14,16 +14,26 @@
 namespace internal {
 namespace compiler {
 
-class SimplifiedLowering {
+// Forward declarations.
+class RepresentationChanger;
+
+
+class SimplifiedLowering FINAL {
  public:
-  explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-  virtual ~SimplifiedLowering() {}
+  SimplifiedLowering(JSGraph* jsgraph, Zone* zone)
+      : jsgraph_(jsgraph), zone_(zone) {}
+  ~SimplifiedLowering() {}
 
   void LowerAllNodes();
 
   // TODO(titzer): These are exposed for direct testing. Use a friend class.
   void DoLoadField(Node* node);
   void DoStoreField(Node* node);
+  // TODO(turbofan): The output_type can be removed once the result of the
+  // representation analysis is stored in the node bounds.
+  void DoLoadBuffer(Node* node, MachineType output_type,
+                    RepresentationChanger* changer);
+  void DoStoreBuffer(Node* node);
   void DoLoadElement(Node* node);
   void DoStoreElement(Node* node);
   void DoStringAdd(Node* node);
@@ -32,14 +42,19 @@
   void DoStringLessThanOrEqual(Node* node);
 
  private:
-  JSGraph* jsgraph_;
+  JSGraph* const jsgraph_;
+  Zone* const zone_;
 
   Node* SmiTag(Node* node);
   Node* IsTagged(Node* node);
   Node* Untag(Node* node);
   Node* OffsetMinusTagConstant(int32_t offset);
-  Node* ComputeIndex(const ElementAccess& access, Node* index);
+  Node* ComputeIndex(const ElementAccess& access, Node* const key);
   Node* StringComparison(Node* node, bool requires_ordering);
+  Node* Int32Div(Node* const node);
+  Node* Int32Mod(Node* const node);
+  Node* Uint32Div(Node* const node);
+  Node* Uint32Mod(Node* const node);
 
   friend class RepresentationSelector;
 
diff --git a/src/compiler/simplified-operator-reducer-unittest.cc b/src/compiler/simplified-operator-reducer-unittest.cc
deleted file mode 100644
index 739264e..0000000
--- a/src/compiler/simplified-operator-reducer-unittest.cc
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/graph-unittest.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/simplified-operator-reducer.h"
-#include "src/compiler/typer.h"
-#include "src/conversions.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class SimplifiedOperatorReducerTest : public GraphTest {
- public:
-  explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
-      : GraphTest(num_parameters), simplified_(zone()) {}
-  virtual ~SimplifiedOperatorReducerTest() {}
-
- protected:
-  Reduction Reduce(Node* node) {
-    Typer typer(zone());
-    MachineOperatorBuilder machine;
-    JSOperatorBuilder javascript(zone());
-    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
-    SimplifiedOperatorReducer reducer(&jsgraph);
-    return reducer.Reduce(node);
-  }
-
-  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- private:
-  SimplifiedOperatorBuilder simplified_;
-};
-
-
-template <typename T>
-class SimplifiedOperatorReducerTestWithParam
-    : public SimplifiedOperatorReducerTest,
-      public ::testing::WithParamInterface<T> {
- public:
-  explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
-      : SimplifiedOperatorReducerTest(num_parameters) {}
-  virtual ~SimplifiedOperatorReducerTestWithParam() {}
-};
-
-
-namespace {
-
-static const double kFloat64Values[] = {
-    -V8_INFINITY,  -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
-    -8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
-    -1.67913e+188, -1.6257e+184,  -2.60043e+170, -2.52941e+168, -3.06033e+116,
-    -4.56201e+52,  -3.56788e+50,  -9.9066e+38,   -3.07261e+31,  -2.1271e+09,
-    -1.91489e+09,  -1.73053e+09,  -9.30675e+08,  -26030,        -20453,
-    -15790,        -11699,        -111,          -97,           -78,
-    -63,           -58,           -1.53858e-06,  -2.98914e-12,  -1.14741e-39,
-    -8.20347e-57,  -1.48932e-59,  -3.17692e-66,  -8.93103e-81,  -3.91337e-83,
-    -6.0489e-92,   -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
-    -1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
-    -1.11612e-293, -0.0,          0.0,           2.22507e-308,  1.06526e-307,
-    4.16643e-227,  6.76624e-223,  2.0432e-197,   3.16254e-184,  1.37315e-173,
-    2.88603e-172,  1.54155e-99,   4.42923e-81,   1.40539e-73,   5.4462e-73,
-    1.24064e-58,   3.11167e-58,   2.75826e-39,   0.143815,      58,
-    67,            601,           7941,          11644,         13697,
-    25680,         29882,         1.32165e+08,   1.62439e+08,   4.16837e+08,
-    9.59097e+08,   1.32491e+09,   1.8728e+09,    1.0672e+17,    2.69606e+46,
-    1.98285e+79,   1.0098e+82,    7.93064e+88,   3.67444e+121,  9.36506e+123,
-    7.27954e+162,  3.05316e+168,  1.16171e+175,  1.64771e+189,  1.1622e+202,
-    2.00748e+239,  2.51778e+244,  3.90282e+306,  1.79769e+308,  V8_INFINITY};
-
-
-static const int32_t kInt32Values[] = {
-    -2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
-    -1318814539,     -1289388009, -1287537572, -1279026536, -1241605942,
-    -1226046939,     -941837148,  -779818051,  -413830641,  -245798087,
-    -184657557,      -127145950,  -105483328,  -32325,      -26653,
-    -23858,          -23834,      -22363,      -19858,      -19044,
-    -18744,          -15528,      -5309,       -3372,       -2093,
-    -104,            -98,         -97,         -93,         -84,
-    -80,             -78,         -76,         -72,         -58,
-    -57,             -56,         -55,         -45,         -40,
-    -34,             -32,         -25,         -24,         -5,
-    -2,              0,           3,           10,          24,
-    34,              42,          46,          47,          48,
-    52,              56,          64,          65,          71,
-    76,              79,          81,          82,          97,
-    102,             103,         104,         106,         107,
-    109,             116,         122,         3653,        4485,
-    12405,           16504,       26262,       28704,       29755,
-    30554,           16476817,    605431957,   832401070,   873617242,
-    914205764,       1062628108,  1087581664,  1488498068,  1534668023,
-    1661587028,      1696896187,  1866841746,  2032089723,  2147483647};
-
-
-static const uint32_t kUint32Values[] = {
-    0x0,        0x5,        0x8,        0xc,        0xd,        0x26,
-    0x28,       0x29,       0x30,       0x34,       0x3e,       0x42,
-    0x50,       0x5b,       0x63,       0x71,       0x77,       0x7c,
-    0x83,       0x88,       0x96,       0x9c,       0xa3,       0xfa,
-    0x7a7,      0x165d,     0x234d,     0x3acb,     0x43a5,     0x4573,
-    0x5b4f,     0x5f14,     0x6996,     0x6c6e,     0x7289,     0x7b9a,
-    0x7bc9,     0x86bb,     0xa839,     0xaa41,     0xb03b,     0xc942,
-    0xce68,     0xcf4c,     0xd3ad,     0xdea3,     0xe90c,     0xed86,
-    0xfba5,     0x172dcc6,  0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
-    0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
-    0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
-    0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
-    0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
-    0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
-    0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
-    0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
-    0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-
-
-MATCHER(IsNaN, std::string(negation ? "isn't" : "is") + " NaN") {
-  return std::isnan(arg);
-}
-
-}  // namespace
-
-
-// -----------------------------------------------------------------------------
-// Unary operators
-
-
-namespace {
-
-struct UnaryOperator {
-  const Operator* (SimplifiedOperatorBuilder::*constructor)();
-  const char* constructor_name;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
-  return os << unop.constructor_name;
-}
-
-
-static const UnaryOperator kUnaryOperators[] = {
-    {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
-    {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
-    {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
-    {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
-     "ChangeFloat64ToTagged"},
-    {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
-    {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
-     "ChangeTaggedToFloat64"},
-    {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
-    {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
-    {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
-
-}  // namespace
-
-
-typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
-    SimplifiedUnaryOperatorTest;
-
-
-TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
-  const UnaryOperator& unop = GetParam();
-  Reduction reduction = Reduce(
-      graph()->NewNode((simplified()->*unop.constructor)(), Parameter(0)));
-  EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
-                        SimplifiedUnaryOperatorTest,
-                        ::testing::ValuesIn(kUnaryOperators));
-
-
-// -----------------------------------------------------------------------------
-// BooleanNot
-
-
-TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithBooleanNot) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(
-      graph()->NewNode(simplified()->BooleanNot(),
-                       graph()->NewNode(simplified()->BooleanNot(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithFalseConstant) {
-  Reduction reduction0 =
-      Reduce(graph()->NewNode(simplified()->BooleanNot(), FalseConstant()));
-  ASSERT_TRUE(reduction0.Changed());
-  EXPECT_THAT(reduction0.replacement(), IsTrueConstant());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
-  Reduction reduction1 =
-      Reduce(graph()->NewNode(simplified()->BooleanNot(), TrueConstant()));
-  ASSERT_TRUE(reduction1.Changed());
-  EXPECT_THAT(reduction1.replacement(), IsFalseConstant());
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeBitToBool(),
-      graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
-  Reduction reduction = Reduce(
-      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
-  Reduction reduction = Reduce(
-      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsTrueConstant());
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
-  Reduction reduction = Reduce(
-      graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeBoolToBit(),
-      graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeFloat64ToTagged
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
-  TRACED_FOREACH(double, n, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsNumberConstant(n));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeInt32ToTagged
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
-  TRACED_FOREACH(int32_t, n, kInt32Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        simplified()->ChangeInt32ToTagged(), Int32Constant(n)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastI2D(n)));
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeTaggedToFloat64
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToFloat64(),
-      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToFloat64WithChangeInt32ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToFloat64(),
-      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsChangeInt32ToFloat64(param0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToFloat64WithChangeUint32ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToFloat64(),
-      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsChangeUint32ToFloat64(param0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
-  TRACED_FOREACH(double, n, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        simplified()->ChangeTaggedToFloat64(), NumberConstant(n)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(n));
-  }
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
-                              NumberConstant(-base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
-                              NumberConstant(base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeTaggedToInt32
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToInt32WithChangeFloat64ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToInt32(),
-      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToInt32WithChangeInt32ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToInt32(),
-      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
-  TRACED_FOREACH(double, n, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
-  }
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
-                              NumberConstant(-base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
-                              NumberConstant(base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeTaggedToUint32
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToUint32WithChangeFloat64ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToUint32(),
-      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest,
-       ChangeTaggedToUint32WithChangeUint32ToTagged) {
-  Node* param0 = Parameter(0);
-  Reduction reduction = Reduce(graph()->NewNode(
-      simplified()->ChangeTaggedToUint32(),
-      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_EQ(param0, reduction.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
-  TRACED_FOREACH(double, n, kFloat64Values) {
-    Reduction reduction = Reduce(graph()->NewNode(
-        simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(),
-                IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
-  }
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
-                              NumberConstant(-base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
-  Reduction reduction =
-      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
-                              NumberConstant(base::OS::nan_value())));
-  ASSERT_TRUE(reduction.Changed());
-  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeUint32ToTagged
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
-  TRACED_FOREACH(uint32_t, n, kUint32Values) {
-    Reduction reduction =
-        Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
-                                Int32Constant(bit_cast<int32_t>(n))));
-    ASSERT_TRUE(reduction.Changed());
-    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index f6181ea..9d45e5b 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -2,21 +2,29 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/simplified-operator-reducer.h"
+
+#include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
-#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/node-properties-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
+    : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+
+
 SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
 
 
 Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kAnyToBoolean:
+      return ReduceAnyToBoolean(node);
     case IrOpcode::kBooleanNot: {
       HeapObjectMatcher<HeapObject> m(node->InputAt(0));
       if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
@@ -102,8 +110,36 @@
 }
 
 
+Reduction SimplifiedOperatorReducer::ReduceAnyToBoolean(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    // AnyToBoolean(x:boolean) => x
+    return Replace(input);
+  }
+  if (input_type->Is(Type::OrderedNumber())) {
+    // AnyToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
+    Node* compare = graph()->NewNode(simplified()->NumberEqual(), input,
+                                     jsgraph()->ZeroConstant());
+    return Change(node, simplified()->BooleanNot(), compare);
+  }
+  if (input_type->Is(Type::String())) {
+    // AnyToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
+    FieldAccess const access = AccessBuilder::ForStringLength();
+    Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+                                    graph()->start(), graph()->start());
+    Node* compare = graph()->NewNode(simplified()->NumberEqual(), length,
+                                     jsgraph()->ZeroConstant());
+    return Change(node, simplified()->BooleanNot(), compare);
+  }
+  return NoChange();
+}
+
+
 Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
                                             Node* a) {
+  DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
+  DCHECK_LE(1, node->InputCount());
   node->set_op(op);
   node->ReplaceInput(0, a);
   return Changed(node);
@@ -138,6 +174,11 @@
 }
 
 
+CommonOperatorBuilder* SimplifiedOperatorReducer::common() const {
+  return jsgraph()->common();
+}
+
+
 MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
   return jsgraph()->machine();
 }
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 32f49ad..1e565b8 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
 
 #include "src/compiler/graph-reducer.h"
+#include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
@@ -16,17 +17,20 @@
 namespace compiler {
 
 // Forward declarations.
+class CommonOperatorBuilder;
 class JSGraph;
 class MachineOperatorBuilder;
 
 class SimplifiedOperatorReducer FINAL : public Reducer {
  public:
-  explicit SimplifiedOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
-  virtual ~SimplifiedOperatorReducer();
+  explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
+  ~SimplifiedOperatorReducer() FINAL;
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
+  Reduction ReduceAnyToBoolean(Node* node);
+
   Reduction Change(Node* node, const Operator* op, Node* a);
   Reduction ReplaceFloat64(double value);
   Reduction ReplaceInt32(int32_t value);
@@ -39,9 +43,12 @@
   Graph* graph() const;
   Factory* factory() const;
   JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
   MachineOperatorBuilder* machine() const;
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
 
   JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
 
   DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
 };
diff --git a/src/compiler/simplified-operator-unittest.cc b/src/compiler/simplified-operator-unittest.cc
deleted file mode 100644
index 4014f24..0000000
--- a/src/compiler/simplified-operator-unittest.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/simplified-operator.h"
-
-#include "src/compiler/operator-properties-inl.h"
-#include "src/test/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(bmeurer): Drop once we use std::ostream instead of our OStream.
-inline std::ostream& operator<<(std::ostream& os, const ElementAccess& access) {
-  OStringStream ost;
-  ost << access;
-  return os << ost.c_str();
-}
-
-
-// -----------------------------------------------------------------------------
-// Pure operators.
-
-
-namespace {
-
-struct PureOperator {
-  const Operator* (SimplifiedOperatorBuilder::*constructor)();
-  IrOpcode::Value opcode;
-  Operator::Properties properties;
-  int value_input_count;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
-  return os << IrOpcode::Mnemonic(pop.opcode);
-}
-
-
-const PureOperator kPureOperators[] = {
-#define PURE(Name, properties, input_count)              \
-  {                                                      \
-    &SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
-        Operator::kPure | properties, input_count        \
-  }
-    PURE(BooleanNot, Operator::kNoProperties, 1),
-    PURE(NumberEqual, Operator::kCommutative, 2),
-    PURE(NumberLessThan, Operator::kNoProperties, 2),
-    PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
-    PURE(NumberAdd, Operator::kCommutative, 2),
-    PURE(NumberSubtract, Operator::kNoProperties, 2),
-    PURE(NumberMultiply, Operator::kCommutative, 2),
-    PURE(NumberDivide, Operator::kNoProperties, 2),
-    PURE(NumberModulus, Operator::kNoProperties, 2),
-    PURE(NumberToInt32, Operator::kNoProperties, 1),
-    PURE(NumberToUint32, Operator::kNoProperties, 1),
-    PURE(StringEqual, Operator::kCommutative, 2),
-    PURE(StringLessThan, Operator::kNoProperties, 2),
-    PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
-    PURE(StringAdd, Operator::kNoProperties, 2),
-    PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
-    PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
-    PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
-    PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
-    PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
-    PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
-    PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
-    PURE(ChangeBitToBool, Operator::kNoProperties, 1)
-#undef PURE
-};
-
-}  // namespace
-
-
-class SimplifiedPureOperatorTest
-    : public TestWithZone,
-      public ::testing::WithParamInterface<PureOperator> {};
-
-
-TEST_P(SimplifiedPureOperatorTest, InstancesAreGloballyShared) {
-  const PureOperator& pop = GetParam();
-  SimplifiedOperatorBuilder simplified1(zone());
-  SimplifiedOperatorBuilder simplified2(zone());
-  EXPECT_EQ((simplified1.*pop.constructor)(), (simplified2.*pop.constructor)());
-}
-
-
-TEST_P(SimplifiedPureOperatorTest, NumberOfInputsAndOutputs) {
-  SimplifiedOperatorBuilder simplified(zone());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (simplified.*pop.constructor)();
-
-  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(SimplifiedPureOperatorTest, OpcodeIsCorrect) {
-  SimplifiedOperatorBuilder simplified(zone());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (simplified.*pop.constructor)();
-  EXPECT_EQ(pop.opcode, op->opcode());
-}
-
-
-TEST_P(SimplifiedPureOperatorTest, Properties) {
-  SimplifiedOperatorBuilder simplified(zone());
-  const PureOperator& pop = GetParam();
-  const Operator* op = (simplified.*pop.constructor)();
-  EXPECT_EQ(pop.properties, op->properties() & pop.properties);
-}
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
-                        ::testing::ValuesIn(kPureOperators));
-
-
-// -----------------------------------------------------------------------------
-// Element access operators.
-
-namespace {
-
-const ElementAccess kElementAccesses[] = {
-    {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachInt8},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachInt16},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachInt32},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachUint8},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachUint16},
-    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
-     kMachUint32},
-    {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
-    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
-    {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
-    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
-    {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
-    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
-    {kUntaggedBase, 0, Type::Number(), kRepFloat32},
-    {kUntaggedBase, 0, Type::Number(), kRepFloat64},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
-     kMachInt8},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
-     kMachUint8},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
-     kMachInt16},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
-     kMachUint16},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
-     kMachInt32},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
-     kMachUint32},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
-     kRepFloat32},
-    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
-     kRepFloat64}};
-
-}  // namespace
-
-
-class SimplifiedElementAccessOperatorTest
-    : public TestWithZone,
-      public ::testing::WithParamInterface<ElementAccess> {};
-
-
-TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
-  SimplifiedOperatorBuilder simplified(zone());
-  const ElementAccess& access = GetParam();
-  const Operator* op = simplified.LoadElement(access);
-
-  EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
-  EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
-  EXPECT_EQ(access, ElementAccessOf(op));
-
-  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
-  SimplifiedOperatorBuilder simplified(zone());
-  const ElementAccess& access = GetParam();
-  const Operator* op = simplified.StoreElement(access);
-
-  EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
-  EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
-  EXPECT_EQ(access, ElementAccessOf(op));
-
-  EXPECT_EQ(4, OperatorProperties::GetValueInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
-  EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
-
-  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
-  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
-  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
-}
-
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
-                        SimplifiedElementAccessOperatorTest,
-                        ::testing::ValuesIn(kElementAccesses));
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 642ffc7..9d88d12 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -13,7 +13,7 @@
 namespace internal {
 namespace compiler {
 
-OStream& operator<<(OStream& os, BaseTaggedness base_taggedness) {
+std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
   switch (base_taggedness) {
     case kUntaggedBase:
       return os << "untagged base";
@@ -25,9 +25,99 @@
 }
 
 
+MachineType BufferAccess::machine_type() const {
+  switch (external_array_type_) {
+    case kExternalUint8Array:
+    case kExternalUint8ClampedArray:
+      return kMachUint8;
+    case kExternalInt8Array:
+      return kMachInt8;
+    case kExternalUint16Array:
+      return kMachUint16;
+    case kExternalInt16Array:
+      return kMachInt16;
+    case kExternalUint32Array:
+      return kMachUint32;
+    case kExternalInt32Array:
+      return kMachInt32;
+    case kExternalFloat32Array:
+      return kMachFloat32;
+    case kExternalFloat64Array:
+      return kMachFloat64;
+  }
+  UNREACHABLE();
+  return kMachNone;
+}
+
+
+bool operator==(BufferAccess lhs, BufferAccess rhs) {
+  return lhs.external_array_type() == rhs.external_array_type();
+}
+
+
+bool operator!=(BufferAccess lhs, BufferAccess rhs) { return !(lhs == rhs); }
+
+
+size_t hash_value(BufferAccess access) {
+  return base::hash<ExternalArrayType>()(access.external_array_type());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BufferAccess access) {
+  switch (access.external_array_type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    return os << #Type;
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+BufferAccess const BufferAccessOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kLoadBuffer ||
+         op->opcode() == IrOpcode::kStoreBuffer);
+  return OpParameter<BufferAccess>(op);
+}
+
+
+bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
+  return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+         lhs.machine_type == rhs.machine_type;
+}
+
+
+bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+size_t hash_value(FieldAccess const& access) {
+  return base::hash_combine(access.base_is_tagged, access.offset,
+                            access.machine_type);
+}
+
+
+std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
+  os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
+#ifdef OBJECT_PRINT
+  Handle<Name> name;
+  if (access.name.ToHandle(&name)) {
+    name->Print(os);
+    os << ", ";
+  }
+#endif
+  access.type->PrintTo(os);
+  os << ", " << access.machine_type << "]";
+  return os;
+}
+
+
 bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
   return lhs.base_is_tagged == rhs.base_is_tagged &&
-         lhs.header_size == rhs.header_size && lhs.type == rhs.type &&
+         lhs.header_size == rhs.header_size &&
          lhs.machine_type == rhs.machine_type;
 }
 
@@ -37,10 +127,16 @@
 }
 
 
-OStream& operator<<(OStream& os, ElementAccess const& access) {
-  os << "[" << access.base_is_tagged << ", " << access.header_size << ", ";
+size_t hash_value(ElementAccess const& access) {
+  return base::hash_combine(access.base_is_tagged, access.header_size,
+                            access.machine_type);
+}
+
+
+std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
+  os << access.base_is_tagged << ", " << access.header_size << ", ";
   access.type->PrintTo(os);
-  os << ", " << access.machine_type << "]";
+  os << ", " << access.machine_type;
   return os;
 }
 
@@ -61,41 +157,8 @@
 }
 
 
-// Specialization for static parameters of type {FieldAccess}.
-template <>
-struct StaticParameterTraits<FieldAccess> {
-  static OStream& PrintTo(OStream& os, const FieldAccess& val) {
-    return os << val.offset;
-  }
-  static int HashCode(const FieldAccess& val) {
-    return (val.offset < 16) | (val.machine_type & 0xffff);
-  }
-  static bool Equals(const FieldAccess& lhs, const FieldAccess& rhs) {
-    return lhs.base_is_tagged == rhs.base_is_tagged &&
-           lhs.offset == rhs.offset && lhs.machine_type == rhs.machine_type &&
-           lhs.type->Is(rhs.type);
-  }
-};
-
-
-// Specialization for static parameters of type {ElementAccess}.
-template <>
-struct StaticParameterTraits<ElementAccess> {
-  static OStream& PrintTo(OStream& os, const ElementAccess& access) {
-    return os << access;
-  }
-  static int HashCode(const ElementAccess& access) {
-    return (access.header_size < 16) | (access.machine_type & 0xffff);
-  }
-  static bool Equals(const ElementAccess& lhs, const ElementAccess& rhs) {
-    return lhs.base_is_tagged == rhs.base_is_tagged &&
-           lhs.header_size == rhs.header_size &&
-           lhs.machine_type == rhs.machine_type && lhs.type->Is(rhs.type);
-  }
-};
-
-
 #define PURE_OP_LIST(V)                                \
+  V(AnyToBoolean, Operator::kNoProperties, 1)          \
   V(BooleanNot, Operator::kNoProperties, 1)            \
   V(BooleanToNumber, Operator::kNoProperties, 1)       \
   V(NumberEqual, Operator::kCommutative, 2)            \
@@ -119,56 +182,106 @@
   V(ChangeUint32ToTagged, Operator::kNoProperties, 1)  \
   V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
   V(ChangeBoolToBit, Operator::kNoProperties, 1)       \
-  V(ChangeBitToBool, Operator::kNoProperties, 1)
+  V(ChangeBitToBool, Operator::kNoProperties, 1)       \
+  V(ObjectIsSmi, Operator::kNoProperties, 1)           \
+  V(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
 
 
-#define ACCESS_OP_LIST(V)                                 \
-  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1)     \
-  V(StoreField, FieldAccess, Operator::kNoRead, 2, 0)     \
-  V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 1) \
-  V(StoreElement, ElementAccess, Operator::kNoRead, 4, 0)
-
-
-struct SimplifiedOperatorBuilderImpl FINAL {
-#define PURE(Name, properties, input_count)                               \
-  struct Name##Operator FINAL : public SimpleOperator {                   \
-    Name##Operator()                                                      \
-        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
-                         input_count, 1, #Name) {}                        \
-  };                                                                      \
+struct SimplifiedOperatorGlobalCache FINAL {
+#define PURE(Name, properties, input_count)                                \
+  struct Name##Operator FINAL : public Operator {                          \
+    Name##Operator()                                                       \
+        : Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
+                   input_count, 0, 0, 1, 0, 0) {}                          \
+  };                                                                       \
   Name##Operator k##Name;
   PURE_OP_LIST(PURE)
 #undef PURE
+
+#define BUFFER_ACCESS(Type, type, TYPE, ctype, size)                          \
+  struct LoadBuffer##Type##Operator FINAL : public Operator1<BufferAccess> {  \
+    LoadBuffer##Type##Operator()                                              \
+        : Operator1<BufferAccess>(IrOpcode::kLoadBuffer,                      \
+                                  Operator::kNoThrow | Operator::kNoWrite,    \
+                                  "LoadBuffer", 3, 1, 1, 1, 1, 0,             \
+                                  BufferAccess(kExternal##Type##Array)) {}    \
+  };                                                                          \
+  struct StoreBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+    StoreBuffer##Type##Operator()                                             \
+        : Operator1<BufferAccess>(IrOpcode::kStoreBuffer,                     \
+                                  Operator::kNoRead | Operator::kNoThrow,     \
+                                  "StoreBuffer", 4, 1, 1, 0, 1, 0,            \
+                                  BufferAccess(kExternal##Type##Array)) {}    \
+  };                                                                          \
+  LoadBuffer##Type##Operator kLoadBuffer##Type;                               \
+  StoreBuffer##Type##Operator kStoreBuffer##Type;
+  TYPED_ARRAYS(BUFFER_ACCESS)
+#undef BUFFER_ACCESS
 };
 
 
-static base::LazyInstance<SimplifiedOperatorBuilderImpl>::type kImpl =
+static base::LazyInstance<SimplifiedOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
 
 SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
-    : impl_(kImpl.Get()), zone_(zone) {}
+    : cache_(kCache.Get()), zone_(zone) {}
 
 
 #define PURE(Name, properties, input_count) \
-  const Operator* SimplifiedOperatorBuilder::Name() { return &impl_.k##Name; }
+  const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
 PURE_OP_LIST(PURE)
 #undef PURE
 
 
 const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
   // TODO(titzer): What about the type parameter?
-  return new (zone()) SimpleOperator(IrOpcode::kReferenceEqual,
-                                     Operator::kCommutative | Operator::kPure,
-                                     2, 1, "ReferenceEqual");
+  return new (zone()) Operator(IrOpcode::kReferenceEqual,
+                               Operator::kCommutative | Operator::kPure,
+                               "ReferenceEqual", 2, 0, 0, 1, 0, 0);
 }
 
 
-#define ACCESS(Name, Type, properties, input_count, output_count)           \
-  const Operator* SimplifiedOperatorBuilder::Name(const Type& access) {     \
-    return new (zone())                                                     \
-        Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
-                        input_count, output_count, #Name, access);          \
+const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
+  switch (access.external_array_type()) {
+#define LOAD_BUFFER(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                     \
+    return &cache_.kLoadBuffer##Type;
+    TYPED_ARRAYS(LOAD_BUFFER)
+#undef LOAD_BUFFER
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+
+const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
+  switch (access.external_array_type()) {
+#define STORE_BUFFER(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                      \
+    return &cache_.kStoreBuffer##Type;
+    TYPED_ARRAYS(STORE_BUFFER)
+#undef STORE_BUFFER
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+
+#define ACCESS_OP_LIST(V)                                    \
+  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1)     \
+  V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0)     \
+  V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+  V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
+
+
+#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
+               output_count)                                                   \
+  const Operator* SimplifiedOperatorBuilder::Name(const Type& access) {        \
+    return new (zone())                                                        \
+        Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties,    \
+                        #Name, value_input_count, 1, control_input_count,      \
+                        output_count, 1, 0, access);                           \
   }
 ACCESS_OP_LIST(ACCESS)
 #undef ACCESS
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 32f0e8b..22664fa 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
 #define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
 
+#include <iosfwd>
+
 #include "src/compiler/machine-type.h"
 #include "src/handles.h"
 
@@ -23,12 +25,36 @@
 
 // Forward declarations.
 class Operator;
-struct SimplifiedOperatorBuilderImpl;
+struct SimplifiedOperatorGlobalCache;
 
 
 enum BaseTaggedness { kUntaggedBase, kTaggedBase };
 
-OStream& operator<<(OStream&, BaseTaggedness);
+std::ostream& operator<<(std::ostream&, BaseTaggedness);
+
+
+// An access descriptor for loads/stores of array buffers.
+class BufferAccess FINAL {
+ public:
+  explicit BufferAccess(ExternalArrayType external_array_type)
+      : external_array_type_(external_array_type) {}
+
+  ExternalArrayType external_array_type() const { return external_array_type_; }
+  MachineType machine_type() const;
+
+ private:
+  ExternalArrayType const external_array_type_;
+};
+
+bool operator==(BufferAccess, BufferAccess);
+bool operator!=(BufferAccess, BufferAccess);
+
+size_t hash_value(BufferAccess);
+
+std::ostream& operator<<(std::ostream&, BufferAccess);
+
+BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
 
 // An access descriptor for loads/stores of fixed structures like field
 // accesses of heap objects. Accesses from either tagged or untagged base
@@ -36,13 +62,22 @@
 struct FieldAccess {
   BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
   int offset;                     // offset of the field, without tag.
-  Handle<Name> name;              // debugging only.
+  MaybeHandle<Name> name;         // debugging only.
   Type* type;                     // type of the field.
   MachineType machine_type;       // machine type of the field.
 
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
 
+bool operator==(FieldAccess const&, FieldAccess const&);
+bool operator!=(FieldAccess const&, FieldAccess const&);
+
+size_t hash_value(FieldAccess const&);
+
+std::ostream& operator<<(std::ostream&, FieldAccess const&);
+
+FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
 
 // An access descriptor for loads/stores of indexed structures like characters
 // in strings or off-heap backing stores. Accesses from either tagged or
@@ -57,18 +92,14 @@
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
 
-bool operator==(ElementAccess const& lhs, ElementAccess const& rhs);
-bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs);
+bool operator==(ElementAccess const&, ElementAccess const&);
+bool operator!=(ElementAccess const&, ElementAccess const&);
 
-OStream& operator<<(OStream&, ElementAccess const&);
+size_t hash_value(ElementAccess const&);
 
+std::ostream& operator<<(std::ostream&, ElementAccess const&);
 
-// If the accessed object is not a heap object, add this to the header_size.
-static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
-
-
-const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
-const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
 
 // Interface for building simplified operators, which represent the
@@ -97,6 +128,8 @@
  public:
   explicit SimplifiedOperatorBuilder(Zone* zone);
 
+  const Operator* AnyToBoolean();
+
   const Operator* BooleanNot();
   const Operator* BooleanToNumber();
 
@@ -127,8 +160,17 @@
   const Operator* ChangeBoolToBit();
   const Operator* ChangeBitToBool();
 
-  const Operator* LoadField(const FieldAccess&);
-  const Operator* StoreField(const FieldAccess&);
+  const Operator* ObjectIsSmi();
+  const Operator* ObjectIsNonNegativeSmi();
+
+  const Operator* LoadField(FieldAccess const&);
+  const Operator* StoreField(FieldAccess const&);
+
+  // load-buffer buffer, offset, length
+  const Operator* LoadBuffer(BufferAccess);
+
+  // store-buffer buffer, offset, length, value
+  const Operator* StoreBuffer(BufferAccess);
 
   // load-element [base + index], length
   const Operator* LoadElement(ElementAccess const&);
@@ -139,7 +181,7 @@
  private:
   Zone* zone() const { return zone_; }
 
-  const SimplifiedOperatorBuilderImpl& impl_;
+  const SimplifiedOperatorGlobalCache& cache_;
   Zone* const zone_;
 
   DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
index 1178390..9e21ae4 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/source-position.cc
@@ -10,12 +10,12 @@
 namespace internal {
 namespace compiler {
 
-class SourcePositionTable::Decorator : public GraphDecorator {
+class SourcePositionTable::Decorator FINAL : public GraphDecorator {
  public:
   explicit Decorator(SourcePositionTable* source_positions)
       : source_positions_(source_positions) {}
 
-  virtual void Decorate(Node* node) {
+  void Decorate(Node* node) FINAL {
     DCHECK(!source_positions_->current_position_.IsInvalid());
     source_positions_->table_.Set(node, source_positions_->current_position_);
   }
@@ -46,7 +46,7 @@
 }
 
 
-SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) const {
   return table_.Get(node);
 }
 
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 778f067..390a17d 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -79,7 +79,7 @@
   void AddDecorator();
   void RemoveDecorator();
 
-  SourcePosition GetSourcePosition(Node* node);
+  SourcePosition GetSourcePosition(Node* node) const;
 
  private:
   class Decorator;
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index bfecdef..137829e 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -2,7 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/bootstrapper.h"
 #include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-properties-inl.h"
@@ -14,64 +16,270 @@
 namespace internal {
 namespace compiler {
 
-Typer::Typer(Zone* zone) : zone_(zone) {
-  Type* number = Type::Number(zone);
-  Type* signed32 = Type::Signed32(zone);
-  Type* unsigned32 = Type::Unsigned32(zone);
-  Type* integral32 = Type::Integral32(zone);
-  Type* object = Type::Object(zone);
-  Type* undefined = Type::Undefined(zone);
+#define NATIVE_TYPES(V) \
+  V(Int8)               \
+  V(Uint8)              \
+  V(Int16)              \
+  V(Uint16)             \
+  V(Int32)              \
+  V(Uint32)             \
+  V(Float32)            \
+  V(Float64)
+
+enum LazyCachedType {
+  kNumberFunc0,
+  kNumberFunc1,
+  kNumberFunc2,
+  kImulFunc,
+  kClz32Func,
+  kArrayBufferFunc,
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  k##Type, k##Type##Array, k##Type##ArrayFunc,
+  TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      kNumLazyCachedTypes
+};
+
+
+// Constructs and caches types lazily.
+// TODO(turbofan): these types could be globally cached or cached per isolate.
+class LazyTypeCache FINAL : public ZoneObject {
+ public:
+  explicit LazyTypeCache(Zone* zone) : zone_(zone) {
+    memset(cache_, 0, sizeof(cache_));
+  }
+
+  inline Type* Get(LazyCachedType type) {
+    int index = static_cast<int>(type);
+    DCHECK(index < kNumLazyCachedTypes);
+    if (cache_[index] == NULL) cache_[index] = Create(type);
+    return cache_[index];
+  }
+
+ private:
+  Type* Create(LazyCachedType type) {
+    switch (type) {
+      case kInt8:
+        return CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
+      case kUint8:
+        return CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
+      case kInt16:
+        return CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
+      case kUint16:
+        return CreateNative(CreateRange<uint16_t>(),
+                            Type::UntaggedUnsigned16());
+      case kInt32:
+        return CreateNative(Type::Signed32(), Type::UntaggedSigned32());
+      case kUint32:
+        return CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
+      case kFloat32:
+        return CreateNative(Type::Number(), Type::UntaggedFloat32());
+      case kFloat64:
+        return CreateNative(Type::Number(), Type::UntaggedFloat64());
+      case kUint8Clamped:
+        return Get(kUint8);
+      case kNumberFunc0:
+        return Type::Function(Type::Number(), zone());
+      case kNumberFunc1:
+        return Type::Function(Type::Number(), Type::Number(), zone());
+      case kNumberFunc2:
+        return Type::Function(Type::Number(), Type::Number(), Type::Number(),
+                              zone());
+      case kImulFunc:
+        return Type::Function(Type::Signed32(), Type::Integral32(),
+                              Type::Integral32(), zone());
+      case kClz32Func:
+        return Type::Function(CreateRange(0, 32), Type::Number(), zone());
+      case kArrayBufferFunc:
+        return Type::Function(Type::Object(zone()), Type::Unsigned32(), zone());
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case k##Type##Array:                                  \
+    return CreateArray(Get(k##Type));                   \
+  case k##Type##ArrayFunc:                              \
+    return CreateArrayFunction(Get(k##Type##Array));
+        TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      case kNumLazyCachedTypes:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  Type* CreateArray(Type* element) const {
+    return Type::Array(element, zone());
+  }
+
+  Type* CreateArrayFunction(Type* array) const {
+    Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
+    Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
+    Type* arg3 = arg2;
+    return Type::Function(array, arg1, arg2, arg3, zone());
+  }
+
+  Type* CreateNative(Type* semantic, Type* representation) const {
+    return Type::Intersect(semantic, representation, zone());
+  }
+
+  template <typename T>
+  Type* CreateRange() const {
+    return CreateRange(std::numeric_limits<T>::min(),
+                       std::numeric_limits<T>::max());
+  }
+
+  Type* CreateRange(double min, double max) const {
+    return Type::Range(factory()->NewNumber(min), factory()->NewNumber(max),
+                       zone());
+  }
+
+  Factory* factory() const { return isolate()->factory(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return zone_; }
+
+  Type* cache_[kNumLazyCachedTypes];
+  Zone* zone_;
+};
+
+
+class Typer::Decorator FINAL : public GraphDecorator {
+ public:
+  explicit Decorator(Typer* typer) : typer_(typer) {}
+  void Decorate(Node* node) FINAL;
+
+ private:
+  Typer* typer_;
+};
+
+
+Typer::Typer(Graph* graph, MaybeHandle<Context> context)
+    : graph_(graph),
+      context_(context),
+      decorator_(NULL),
+      cache_(new (graph->zone()) LazyTypeCache(graph->zone())),
+      weaken_min_limits_(graph->zone()),
+      weaken_max_limits_(graph->zone()) {
+  Zone* zone = this->zone();
+  Factory* f = zone->isolate()->factory();
+
+  Handle<Object> zero = f->NewNumber(0);
+  Handle<Object> one = f->NewNumber(1);
+  Handle<Object> infinity = f->NewNumber(+V8_INFINITY);
+  Handle<Object> minusinfinity = f->NewNumber(-V8_INFINITY);
+
+  Type* number = Type::Number();
+  Type* signed32 = Type::Signed32();
+  Type* unsigned32 = Type::Unsigned32();
+  Type* nan_or_minuszero = Type::Union(Type::NaN(), Type::MinusZero(), zone);
+  Type* truncating_to_zero =
+      Type::Union(Type::Union(Type::Constant(infinity, zone),
+                              Type::Constant(minusinfinity, zone), zone),
+                  nan_or_minuszero, zone);
+
+  boolean_or_number = Type::Union(Type::Boolean(), Type::Number(), zone);
+  undefined_or_null = Type::Union(Type::Undefined(), Type::Null(), zone);
+  undefined_or_number = Type::Union(Type::Undefined(), Type::Number(), zone);
+  singleton_false = Type::Constant(f->false_value(), zone);
+  singleton_true = Type::Constant(f->true_value(), zone);
+  singleton_zero = Type::Range(zero, zero, zone);
+  singleton_one = Type::Range(one, one, zone);
+  zero_or_one = Type::Union(singleton_zero, singleton_one, zone);
+  zeroish = Type::Union(singleton_zero, nan_or_minuszero, zone);
+  signed32ish = Type::Union(signed32, truncating_to_zero, zone);
+  unsigned32ish = Type::Union(unsigned32, truncating_to_zero, zone);
+  falsish = Type::Union(Type::Undetectable(),
+                        Type::Union(Type::Union(singleton_false, zeroish, zone),
+                                    undefined_or_null, zone),
+                        zone);
+  truish = Type::Union(
+      singleton_true,
+      Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone), zone);
+  integer = Type::Range(minusinfinity, infinity, zone);
+  weakint = Type::Union(integer, nan_or_minuszero, zone);
+
   number_fun0_ = Type::Function(number, zone);
   number_fun1_ = Type::Function(number, number, zone);
   number_fun2_ = Type::Function(number, number, number, zone);
-  imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
 
-#define NATIVE_TYPE(sem, rep) \
-  Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
-  // TODO(rossberg): Use range types for more precision, once we have them.
-  Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8);
-  Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16);
-  Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
-  Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8);
-  Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16);
-  Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
-  Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
-  Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
-#undef NATIVE_TYPE
-  Type* buffer = Type::Buffer(zone);
-  Type* int8_array = Type::Array(int8, zone);
-  Type* int16_array = Type::Array(int16, zone);
-  Type* int32_array = Type::Array(int32, zone);
-  Type* uint8_array = Type::Array(uint8, zone);
-  Type* uint16_array = Type::Array(uint16, zone);
-  Type* uint32_array = Type::Array(uint32, zone);
-  Type* float32_array = Type::Array(float32, zone);
-  Type* float64_array = Type::Array(float64, zone);
-  Type* arg1 = Type::Union(unsigned32, object, zone);
-  Type* arg2 = Type::Union(unsigned32, undefined, zone);
-  Type* arg3 = arg2;
-  array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
-  int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
-  int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
-  int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
-  uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
-  uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
-  uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
-  float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
-  float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+  weakint_fun1_ = Type::Function(weakint, number, zone);
+  random_fun_ = Type::Function(Type::OrderedNumber(), zone);
+
+  const int limits_count = 20;
+
+  weaken_min_limits_.reserve(limits_count + 1);
+  weaken_max_limits_.reserve(limits_count + 1);
+
+  double limit = 1 << 30;
+  weaken_min_limits_.push_back(f->NewNumber(0));
+  weaken_max_limits_.push_back(f->NewNumber(0));
+  for (int i = 0; i < limits_count; i++) {
+    weaken_min_limits_.push_back(f->NewNumber(-limit));
+    weaken_max_limits_.push_back(f->NewNumber(limit - 1));
+    limit *= 2;
+  }
+
+  decorator_ = new (zone) Decorator(this);
+  graph_->AddDecorator(decorator_);
 }
 
 
-class Typer::Visitor : public NullNodeVisitor {
+Typer::~Typer() {
+  graph_->RemoveDecorator(decorator_);
+}
+
+
+class Typer::Visitor : public Reducer {
  public:
-  Visitor(Typer* typer, MaybeHandle<Context> context)
-      : typer_(typer), context_(context) {}
+  explicit Visitor(Typer* typer) : typer_(typer) {}
+
+  Reduction Reduce(Node* node) OVERRIDE {
+    if (node->op()->ValueOutputCount() == 0) return NoChange();
+    switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateBounds(node, TypeBinaryOp(node, x##Typer));
+      JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateBounds(node, Type##x(node));
+      DECLARE_CASE(Start)
+      // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
+      COMMON_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_OP_LIST(DECLARE_CASE)
+      MACHINE_OP_LIST(DECLARE_CASE)
+      JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
+      JS_OBJECT_OP_LIST(DECLARE_CASE)
+      JS_CONTEXT_OP_LIST(DECLARE_CASE)
+      JS_OTHER_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+      DECLARE_CASE(End)
+      INNER_CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+      break;
+    }
+    return NoChange();
+  }
 
   Bounds TypeNode(Node* node) {
     switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+      case IrOpcode::k##x: return TypeBinaryOp(node, x##Typer);
+      JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
 #define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
       DECLARE_CASE(Start)
-      VALUE_OP_LIST(DECLARE_CASE)
+      // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
+      COMMON_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_OP_LIST(DECLARE_CASE)
+      MACHINE_OP_LIST(DECLARE_CASE)
+      JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
+      JS_OBJECT_OP_LIST(DECLARE_CASE)
+      JS_CONTEXT_OP_LIST(DECLARE_CASE)
+      JS_OTHER_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
 
 #define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -86,142 +294,288 @@
 
   Type* TypeConstant(Handle<Object> value);
 
- protected:
+ private:
+  Typer* typer_;
+  MaybeHandle<Context> context_;
+
 #define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
   DECLARE_METHOD(Start)
   VALUE_OP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  Bounds OperandType(Node* node, int i) {
-    return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i));
+  Bounds BoundsOrNone(Node* node) {
+    return NodeProperties::IsTyped(node) ? NodeProperties::GetBounds(node)
+                                         : Bounds(Type::None());
   }
 
-  Type* ContextType(Node* node) {
-    Bounds result =
-        NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
+  Bounds Operand(Node* node, int i) {
+    Node* operand_node = NodeProperties::GetValueInput(node, i);
+    return BoundsOrNone(operand_node);
+  }
+
+  Bounds ContextOperand(Node* node) {
+    Bounds result = BoundsOrNone(NodeProperties::GetContextInput(node));
     DCHECK(result.upper->Maybe(Type::Internal()));
     // TODO(rossberg): More precisely, instead of the above assertion, we should
     // back-propagate the constraint that it has to be a subtype of Internal.
-    return result.upper;
+    return result;
   }
 
+  Type* Weaken(Type* current_type, Type* previous_type);
+
   Zone* zone() { return typer_->zone(); }
   Isolate* isolate() { return typer_->isolate(); }
-  MaybeHandle<Context> context() { return context_; }
+  Graph* graph() { return typer_->graph(); }
+  MaybeHandle<Context> context() { return typer_->context(); }
 
- private:
-  Typer* typer_;
-  MaybeHandle<Context> context_;
+  typedef Type* (*UnaryTyperFun)(Type*, Typer* t);
+  typedef Type* (*BinaryTyperFun)(Type*, Type*, Typer* t);
+
+  Bounds TypeUnaryOp(Node* node, UnaryTyperFun);
+  Bounds TypeBinaryOp(Node* node, BinaryTyperFun);
+
+  static Type* Invert(Type*, Typer*);
+  static Type* FalsifyUndefined(Type*, Typer*);
+  static Type* Rangify(Type*, Typer*);
+
+  static Type* ToPrimitive(Type*, Typer*);
+  static Type* ToBoolean(Type*, Typer*);
+  static Type* ToNumber(Type*, Typer*);
+  static Type* ToString(Type*, Typer*);
+  static Type* NumberToInt32(Type*, Typer*);
+  static Type* NumberToUint32(Type*, Typer*);
+
+  static Type* JSAddRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* JSSubtractRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* JSMultiplyRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
+
+  static Type* JSCompareTyper(Type*, Type*, Typer*);
+
+#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
+  JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+  static Type* JSUnaryNotTyper(Type*, Typer*);
+  static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
+  static Type* JSCallFunctionTyper(Type*, Typer*);
+
+  Reduction UpdateBounds(Node* node, Bounds current) {
+    if (NodeProperties::IsTyped(node)) {
+      // Widen the bounds of a previously typed node.
+      Bounds previous = NodeProperties::GetBounds(node);
+      // Speed up termination in the presence of range types:
+      current.upper = Weaken(current.upper, previous.upper);
+      current.lower = Weaken(current.lower, previous.lower);
+
+      // Types should not get less precise.
+      DCHECK(previous.lower->Is(current.lower));
+      DCHECK(previous.upper->Is(current.upper));
+
+      NodeProperties::SetBounds(node, current);
+      if (!(previous.Narrows(current) && current.Narrows(previous))) {
+        // If something changed, revisit all uses.
+        return Changed(node);
+      }
+      return NoChange();
+    } else {
+      // No previous type, simply update the bounds.
+      NodeProperties::SetBounds(node, current);
+      return Changed(node);
+    }
+  }
 };
 
 
-class Typer::RunVisitor : public Typer::Visitor {
- public:
-  RunVisitor(Typer* typer, MaybeHandle<Context> context)
-      : Visitor(typer, context),
-        redo(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
-
-  GenericGraphVisit::Control Post(Node* node) {
-    if (OperatorProperties::HasValueOutput(node->op())) {
-      Bounds bounds = TypeNode(node);
-      NodeProperties::SetBounds(node, bounds);
-      // Remember incompletely typed nodes for least fixpoint iteration.
-      int arity = OperatorProperties::GetValueInputCount(node->op());
-      for (int i = 0; i < arity; ++i) {
-        // TODO(rossberg): change once IsTyped is available.
-        // if (!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i)))
-        if (OperandType(node, i).upper->Is(Type::None())) {
-          redo.insert(node);
-          break;
+void Typer::Run() {
+  {
+    // TODO(titzer): this is a hack. Reset types for interior nodes first.
+    NodeDeque deque(zone());
+    NodeMarker<bool> marked(graph(), 2);
+    deque.push_front(graph()->end());
+    marked.Set(graph()->end(), true);
+    while (!deque.empty()) {
+      Node* node = deque.front();
+      deque.pop_front();
+      // TODO(titzer): there shouldn't be a need to retype constants.
+      if (node->op()->ValueOutputCount() > 0)
+        NodeProperties::RemoveBounds(node);
+      for (Node* input : node->inputs()) {
+        if (!marked.Get(input)) {
+          marked.Set(input, true);
+          deque.push_back(input);
         }
       }
     }
-    return GenericGraphVisit::CONTINUE;
   }
 
-  NodeSet redo;
-};
+  Visitor visitor(this);
+  GraphReducer graph_reducer(graph(), zone());
+  graph_reducer.AddReducer(&visitor);
+  graph_reducer.ReduceGraph();
+}
 
 
-class Typer::NarrowVisitor : public Typer::Visitor {
- public:
-  NarrowVisitor(Typer* typer, MaybeHandle<Context> context)
-      : Visitor(typer, context) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    if (OperatorProperties::HasValueOutput(node->op())) {
-      Bounds previous = NodeProperties::GetBounds(node);
-      Bounds bounds = TypeNode(node);
-      NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
-      DCHECK(bounds.Narrows(previous));
-      // Stop when nothing changed (but allow re-entry in case it does later).
-      return previous.Narrows(bounds)
-          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
-    } else {
-      return GenericGraphVisit::SKIP;
+void Typer::Decorator::Decorate(Node* node) {
+  if (node->op()->ValueOutputCount() > 0) {
+    // Only eagerly type-decorate nodes with known input types.
+    // Other cases will generally require a proper fixpoint iteration with Run.
+    bool is_typed = NodeProperties::IsTyped(node);
+    if (is_typed || NodeProperties::AllValueInputsAreTyped(node)) {
+      Visitor typing(typer_);
+      Bounds bounds = typing.TypeNode(node);
+      if (is_typed) {
+        bounds =
+          Bounds::Both(bounds, NodeProperties::GetBounds(node), typer_->zone());
+      }
+      NodeProperties::SetBounds(node, bounds);
     }
   }
-
-  GenericGraphVisit::Control Post(Node* node) {
-    return GenericGraphVisit::REENTER;
-  }
-};
-
-
-class Typer::WidenVisitor : public Typer::Visitor {
- public:
-  WidenVisitor(Typer* typer, MaybeHandle<Context> context)
-      : Visitor(typer, context) {}
-
-  GenericGraphVisit::Control Pre(Node* node) {
-    if (OperatorProperties::HasValueOutput(node->op())) {
-      Bounds previous = NodeProperties::GetBounds(node);
-      Bounds bounds = TypeNode(node);
-      DCHECK(previous.lower->Is(bounds.lower));
-      DCHECK(previous.upper->Is(bounds.upper));
-      NodeProperties::SetBounds(node, bounds);  // TODO(rossberg): Either?
-      // Stop when nothing changed (but allow re-entry in case it does later).
-      return bounds.Narrows(previous)
-          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
-    } else {
-      return GenericGraphVisit::SKIP;
-    }
-  }
-
-  GenericGraphVisit::Control Post(Node* node) {
-    return GenericGraphVisit::REENTER;
-  }
-};
-
-
-void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
-  RunVisitor typing(this, context);
-  graph->VisitNodeInputsFromEnd(&typing);
-  // Find least fixpoint.
-  for (NodeSetIter i = typing.redo.begin(); i != typing.redo.end(); ++i) {
-    Widen(graph, *i, context);
-  }
 }
 
 
-void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) {
-  NarrowVisitor typing(this, context);
-  graph->VisitNodeUsesFrom(start, &typing);
+// -----------------------------------------------------------------------------
+
+// Helper functions that lift a function f on types to a function on bounds,
+// and uses that to type the given node.  Note that f is never called with None
+// as an argument.
+
+
+Bounds Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
+  Bounds input = Operand(node, 0);
+  Type* upper = input.upper->Is(Type::None())
+      ? Type::None()
+      : f(input.upper, typer_);
+  Type* lower = input.lower->Is(Type::None())
+      ? Type::None()
+      : (input.lower == input.upper || upper->IsConstant())
+      ? upper  // TODO(neis): Extend this to Range(x,x), NaN, MinusZero, ...?
+      : f(input.lower, typer_);
+  // TODO(neis): Figure out what to do with lower bound.
+  return Bounds(lower, upper);
 }
 
 
-void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
-  WidenVisitor typing(this, context);
-  graph->VisitNodeUsesFrom(start, &typing);
+Bounds Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
+  Bounds left = Operand(node, 0);
+  Bounds right = Operand(node, 1);
+  Type* upper = left.upper->Is(Type::None()) || right.upper->Is(Type::None())
+      ? Type::None()
+      : f(left.upper, right.upper, typer_);
+  Type* lower = left.lower->Is(Type::None()) || right.lower->Is(Type::None())
+      ? Type::None()
+      : ((left.lower == left.upper && right.lower == right.upper) ||
+         upper->IsConstant())
+      ? upper
+      : f(left.lower, right.lower, typer_);
+  // TODO(neis): Figure out what to do with lower bound.
+  return Bounds(lower, upper);
 }
 
 
-void Typer::Init(Node* node) {
-  if (OperatorProperties::HasValueOutput(node->op())) {
-    Visitor typing(this, MaybeHandle<Context>());
-    Bounds bounds = typing.TypeNode(node);
-    NodeProperties::SetBounds(node, bounds);
+Type* Typer::Visitor::Invert(Type* type, Typer* t) {
+  if (type->Is(t->singleton_false)) return t->singleton_true;
+  if (type->Is(t->singleton_true)) return t->singleton_false;
+  return type;
+}
+
+
+Type* Typer::Visitor::FalsifyUndefined(Type* type, Typer* t) {
+  if (type->Is(Type::Undefined())) return t->singleton_false;
+  return type;
+}
+
+
+Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
+  if (type->IsRange()) return type;        // Shortcut.
+  if (!type->Is(t->integer) && !type->Is(Type::Integral32())) {
+    return type;  // Give up on non-integer types.
   }
+  double min = type->Min();
+  double max = type->Max();
+  // Handle the degenerate case of empty bitset types (such as
+  // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
+  if (std::isnan(min)) {
+    DCHECK(std::isnan(max));
+    return type;
+  }
+  Factory* f = t->isolate()->factory();
+  return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+}
+
+
+// Type conversion.
+
+
+Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
+  if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
+    return type;
+  }
+  return Type::Primitive();
+}
+
+
+Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
+  if (type->Is(Type::Boolean())) return type;
+  if (type->Is(t->falsish)) return t->singleton_false;
+  if (type->Is(t->truish)) return t->singleton_true;
+  if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
+    return t->singleton_true;  // Ruled out nan, -0 and +0.
+  }
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
+  if (type->Is(Type::Number())) return type;
+  if (type->Is(Type::Null())) return t->singleton_zero;
+  if (type->Is(Type::Undefined())) return Type::NaN();
+  if (type->Is(t->undefined_or_null)) {
+    return Type::Union(Type::NaN(), t->singleton_zero, t->zone());
+  }
+  if (type->Is(t->undefined_or_number)) {
+    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
+                       Type::NaN(), t->zone());
+  }
+  if (type->Is(t->singleton_false)) return t->singleton_zero;
+  if (type->Is(t->singleton_true)) return t->singleton_one;
+  if (type->Is(Type::Boolean())) return t->zero_or_one;
+  if (type->Is(t->boolean_or_number)) {
+    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
+                       t->zero_or_one, t->zone());
+  }
+  return Type::Number();
+}
+
+
+Type* Typer::Visitor::ToString(Type* type, Typer* t) {
+  if (type->Is(Type::String())) return type;
+  return Type::String();
+}
+
+
+Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
+  // TODO(neis): DCHECK(type->Is(Type::Number()));
+  if (type->Is(Type::Signed32())) return type;
+  if (type->Is(t->zeroish)) return t->singleton_zero;
+  if (type->Is(t->signed32ish)) {
+    return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
+                           Type::Signed32(), t->zone());
+  }
+  return Type::Signed32();
+}
+
+
+Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
+  // TODO(neis): DCHECK(type->Is(Type::Number()));
+  if (type->Is(Type::Unsigned32())) return type;
+  if (type->Is(t->zeroish)) return t->singleton_zero;
+  if (type->Is(t->unsigned32ish)) {
+    return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
+                           Type::Unsigned32(), t->zone());
+  }
+  return Type::Unsigned32();
 }
 
 
@@ -230,64 +584,75 @@
 
 // Control operators.
 
+
 Bounds Typer::Visitor::TypeStart(Node* node) {
-  return Bounds(Type::Internal(zone()));
+  return Bounds(Type::None(zone()), Type::Internal(zone()));
 }
 
 
 // Common operators.
 
+
 Bounds Typer::Visitor::TypeParameter(Node* node) {
   return Bounds::Unbounded(zone());
 }
 
 
 Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
-  // TODO(titzer): only call Type::Of() if the type is not already known.
-  return Bounds(Type::Of(OpParameter<int32_t>(node), zone()));
+  Factory* f = isolate()->factory();
+  Handle<Object> number = f->NewNumber(OpParameter<int32_t>(node));
+  return Bounds(Type::Intersect(
+      Type::Range(number, number, zone()), Type::UntaggedSigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
-  // TODO(titzer): only call Type::Of() if the type is not already known.
-  return Bounds(
-      Type::Of(static_cast<double>(OpParameter<int64_t>(node)), zone()));
+  // TODO(rossberg): This actually seems to be a PointerConstant so far...
+  return Bounds(Type::Internal());  // TODO(rossberg): Add int64 bitset type?
 }
 
 
 Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
-  // TODO(titzer): only call Type::Of() if the type is not already known.
-  return Bounds(Type::Of(OpParameter<float>(node), zone()));
+  return Bounds(Type::Intersect(
+      Type::Of(OpParameter<float>(node), zone()),
+      Type::UntaggedFloat32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
-  // TODO(titzer): only call Type::Of() if the type is not already known.
-  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+  return Bounds(Type::Intersect(
+      Type::Of(OpParameter<double>(node), zone()),
+      Type::UntaggedFloat64(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
-  // TODO(titzer): only call Type::Of() if the type is not already known.
-  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+  Factory* f = isolate()->factory();
+  return Bounds(Type::Constant(
+      f->NewNumber(OpParameter<double>(node)), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
-  return Bounds(TypeConstant(OpParameter<Unique<Object> >(node).handle()));
+  return Bounds(TypeConstant(OpParameter<Unique<HeapObject> >(node).handle()));
 }
 
 
 Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
-  return Bounds(Type::Internal(zone()));
+  return Bounds(Type::None(zone()), Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeSelect(Node* node) {
+  return Bounds::Either(Operand(node, 1), Operand(node, 2), zone());
 }
 
 
 Bounds Typer::Visitor::TypePhi(Node* node) {
-  int arity = OperatorProperties::GetValueInputCount(node->op());
-  Bounds bounds = OperandType(node, 0);
+  int arity = node->op()->ValueInputCount();
+  Bounds bounds = Operand(node, 0);
   for (int i = 1; i < arity; ++i) {
-    bounds = Bounds::Either(bounds, OperandType(node, i), zone());
+    bounds = Bounds::Either(bounds, Operand(node, i), zone());
   }
   return bounds;
 }
@@ -299,12 +664,6 @@
 }
 
 
-Bounds Typer::Visitor::TypeControlEffect(Node* node) {
-  UNREACHABLE();
-  return Bounds();
-}
-
-
 Bounds Typer::Visitor::TypeValueEffect(Node* node) {
   UNREACHABLE();
   return Bounds();
@@ -312,18 +671,18 @@
 
 
 Bounds Typer::Visitor::TypeFinish(Node* node) {
-  return OperandType(node, 0);
+  return Operand(node, 0);
 }
 
 
 Bounds Typer::Visitor::TypeFrameState(Node* node) {
   // TODO(rossberg): Ideally FrameState wouldn't have a value output.
-  return Bounds(Type::Internal(zone()));
+  return Bounds(Type::None(zone()), Type::Internal(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeStateValues(Node* node) {
-  return Bounds(Type::Internal(zone()));
+  return Bounds(Type::None(zone()), Type::Internal(zone()));
 }
 
 
@@ -340,159 +699,541 @@
 
 // JS comparison operators.
 
-#define DEFINE_METHOD(x)                       \
-  Bounds Typer::Visitor::Type##x(Node* node) { \
-    return Bounds(Type::Boolean(zone()));      \
+
+Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
+  if (lhs->Is(t->undefined_or_null) && rhs->Is(t->undefined_or_null)) {
+    return t->singleton_true;
   }
-JS_COMPARE_BINOP_LIST(DEFINE_METHOD)
-#undef DEFINE_METHOD
+  if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
+      (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
+      return t->singleton_false;
+  }
+  if (lhs->IsConstant() && rhs->Is(lhs)) {
+    // Types are equal and are inhabited only by a single semantic value,
+    // which is not nan due to the earlier check.
+    // TODO(neis): Extend this to Range(x,x), MinusZero, ...?
+    return t->singleton_true;
+  }
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::JSNotEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+  return Invert(JSEqualTyper(lhs, rhs, t), t);
+}
+
+
+static Type* JSType(Type* type) {
+  if (type->Is(Type::Boolean())) return Type::Boolean();
+  if (type->Is(Type::String())) return Type::String();
+  if (type->Is(Type::Number())) return Type::Number();
+  if (type->Is(Type::Undefined())) return Type::Undefined();
+  if (type->Is(Type::Null())) return Type::Null();
+  if (type->Is(Type::Symbol())) return Type::Symbol();
+  if (type->Is(Type::Receiver())) return Type::Receiver();  // JS "Object"
+  return Type::Any();
+}
+
+
+Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+  if (!JSType(lhs)->Maybe(JSType(rhs))) return t->singleton_false;
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return t->singleton_false;
+  if (lhs->Is(Type::Number()) && rhs->Is(Type::Number()) &&
+      (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
+      return t->singleton_false;
+  }
+  if (lhs->IsConstant() && rhs->Is(lhs)) {
+    // Types are equal and are inhabited only by a single semantic value,
+    // which is not nan due to the earlier check.
+    return t->singleton_true;
+  }
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::JSStrictNotEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+  return Invert(JSStrictEqualTyper(lhs, rhs, t), t);
+}
+
+
+// The EcmaScript specification defines the four relational comparison operators
+// (<, <=, >=, >) with the help of a single abstract one.  It behaves like <
+// but returns undefined when the inputs cannot be compared.
+// We implement the typing analogously.
+Type* Typer::Visitor::JSCompareTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = ToPrimitive(lhs, t);
+  rhs = ToPrimitive(rhs, t);
+  if (lhs->Maybe(Type::String()) && rhs->Maybe(Type::String())) {
+    return Type::Boolean();
+  }
+  lhs = ToNumber(lhs, t);
+  rhs = ToNumber(rhs, t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::Undefined();
+  if (lhs->IsConstant() && rhs->Is(lhs)) {
+    // Types are equal and are inhabited only by a single semantic value,
+    // which is not NaN due to the previous check.
+    return t->singleton_false;
+  }
+  if (lhs->Min() >= rhs->Max()) return t->singleton_false;
+  if (lhs->Max() < rhs->Min() &&
+      !lhs->Maybe(Type::NaN()) && !rhs->Maybe(Type::NaN())) {
+    return t->singleton_true;
+  }
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::JSLessThanTyper(Type* lhs, Type* rhs, Typer* t) {
+  return FalsifyUndefined(JSCompareTyper(lhs, rhs, t), t);
+}
+
+
+Type* Typer::Visitor::JSGreaterThanTyper(Type* lhs, Type* rhs, Typer* t) {
+  return FalsifyUndefined(JSCompareTyper(rhs, lhs, t), t);
+}
+
+
+Type* Typer::Visitor::JSLessThanOrEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+  return FalsifyUndefined(Invert(JSCompareTyper(rhs, lhs, t), t), t);
+}
+
+
+Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
+    Type* lhs, Type* rhs, Typer* t) {
+  return FalsifyUndefined(Invert(JSCompareTyper(lhs, rhs, t), t), t);
+}
 
 
 // JS bitwise operators.
 
-Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) {
-  Bounds left = OperandType(node, 0);
-  Bounds right = OperandType(node, 1);
-  Type* upper = Type::Union(left.upper, right.upper, zone());
-  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
-  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
-  return Bounds(lower, upper);
+
+Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
+  Factory* f = t->isolate()->factory();
+  lhs = NumberToInt32(ToNumber(lhs, t), t);
+  rhs = NumberToInt32(ToNumber(rhs, t), t);
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  // Or-ing any two values results in a value no smaller than their minimum.
+  // Even no smaller than their maximum if both values are non-negative.
+  double min =
+      lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
+  double max = Type::Signed32()->Max();
+
+  // Or-ing with 0 is essentially a conversion to int32.
+  if (rmin == 0 && rmax == 0) {
+    min = lmin;
+    max = lmax;
+  }
+  if (lmin == 0 && lmax == 0) {
+    min = rmin;
+    max = rmax;
+  }
+
+  if (lmax < 0 || rmax < 0) {
+    // Or-ing two values of which at least one is negative results in a negative
+    // value.
+    max = std::min(max, -1.0);
+  }
+  return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+  // TODO(neis): Be precise for singleton inputs, here and elsewhere.
 }
 
 
-Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) {
-  Bounds left = OperandType(node, 0);
-  Bounds right = OperandType(node, 1);
-  Type* upper = Type::Union(left.upper, right.upper, zone());
-  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
-  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
-  return Bounds(lower, upper);
+Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
+  Factory* f = t->isolate()->factory();
+  lhs = NumberToInt32(ToNumber(lhs, t), t);
+  rhs = NumberToInt32(ToNumber(rhs, t), t);
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  double min = Type::Signed32()->Min();
+  // And-ing any two values results in a value no larger than their maximum.
+  // Even no larger than their minimum if both values are non-negative.
+  double max =
+      lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
+  // And-ing with a non-negative value x causes the result to be between
+  // zero and x.
+  if (lmin >= 0) {
+    min = 0;
+    max = std::min(max, lmax);
+  }
+  if (rmin >= 0) {
+    min = 0;
+    max = std::min(max, rmax);
+  }
+  return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
 }
 
 
-Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = NumberToInt32(ToNumber(lhs, t), t);
+  rhs = NumberToInt32(ToNumber(rhs, t), t);
+  double lmin = lhs->Min();
+  double rmin = rhs->Min();
+  double lmax = lhs->Max();
+  double rmax = rhs->Max();
+  if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
+    // Xor-ing negative or non-negative values results in a non-negative value.
+    return Type::NonNegativeSigned32();
+  }
+  if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
+    // Xor-ing a negative and a non-negative value results in a negative value.
+    // TODO(jarin) Use a range here.
+    return Type::NegativeSigned32();
+  }
+  return Type::Signed32();
 }
 
 
-Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
+  return Type::Signed32();
 }
 
 
-Bounds Typer::Visitor::TypeJSShiftRight(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = NumberToInt32(ToNumber(lhs, t), t);
+  rhs = NumberToUint32(ToNumber(rhs, t), t);
+  double min = kMinInt;
+  double max = kMaxInt;
+  if (lhs->Min() >= 0) {
+    // Right-shifting a non-negative value cannot make it negative, nor larger.
+    min = std::max(min, 0.0);
+    max = std::min(max, lhs->Max());
+  }
+  if (lhs->Max() < 0) {
+    // Right-shifting a negative value cannot make it non-negative, nor smaller.
+    min = std::max(min, lhs->Min());
+    max = std::min(max, -1.0);
+  }
+  if (rhs->Min() > 0 && rhs->Max() <= 31) {
+    // Right-shifting by a positive value yields a small integer value.
+    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
+    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
+    min = std::max(min, shift_min);
+    max = std::min(max, shift_max);
+  }
+  // TODO(jarin) Ideally, the following micro-optimization should be performed
+  // by the type constructor.
+  if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
+    Factory* f = t->isolate()->factory();
+    return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
+  }
+  return Type::Signed32();
 }
 
 
-Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) {
-  return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone()));
+Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = NumberToUint32(ToNumber(lhs, t), t);
+  Factory* f = t->isolate()->factory();
+  // Logical right-shifting any value cannot make it larger.
+  Handle<Object> min = f->NewNumber(0);
+  Handle<Object> max = f->NewNumber(lhs->Max());
+  return Type::Range(min, max, t->zone());
 }
 
 
 // JS arithmetic operators.
 
-Bounds Typer::Visitor::TypeJSAdd(Node* node) {
-  Bounds left = OperandType(node, 0);
-  Bounds right = OperandType(node, 1);
-  Type* lower =
-      left.lower->Is(Type::None()) || right.lower->Is(Type::None()) ?
-          Type::None(zone()) :
-      left.lower->Is(Type::Number()) && right.lower->Is(Type::Number()) ?
-          Type::SignedSmall(zone()) :
-      left.lower->Is(Type::String()) || right.lower->Is(Type::String()) ?
-          Type::String(zone()) : Type::None(zone());
-  Type* upper =
-      left.upper->Is(Type::None()) && right.upper->Is(Type::None()) ?
-          Type::None(zone()) :
-      left.upper->Is(Type::Number()) && right.upper->Is(Type::Number()) ?
-          Type::Number(zone()) :
-      left.upper->Is(Type::String()) || right.upper->Is(Type::String()) ?
-          Type::String(zone()) : Type::NumberOrString(zone());
-  return Bounds(lower, upper);
+
+// Returns the array's least element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+static double array_min(double a[], size_t n) {
+  DCHECK(n != 0);
+  double x = +V8_INFINITY;
+  for (size_t i = 0; i < n; ++i) {
+    if (!std::isnan(a[i])) {
+      x = std::min(a[i], x);
+    }
+  }
+  DCHECK(!std::isnan(x));
+  return x == 0 ? 0 : x;  // -0 -> 0
 }
 
 
-Bounds Typer::Visitor::TypeJSSubtract(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+// Returns the array's greatest element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+static double array_max(double a[], size_t n) {
+  DCHECK(n != 0);
+  double x = -V8_INFINITY;
+  for (size_t i = 0; i < n; ++i) {
+    if (!std::isnan(a[i])) {
+      x = std::max(a[i], x);
+    }
+  }
+  DCHECK(!std::isnan(x));
+  return x == 0 ? 0 : x;  // -0 -> 0
 }
 
 
-Bounds Typer::Visitor::TypeJSMultiply(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
+                                  Typer* t) {
+  double results[4];
+  results[0] = lhs->Min()->Number() + rhs->Min()->Number();
+  results[1] = lhs->Min()->Number() + rhs->Max()->Number();
+  results[2] = lhs->Max()->Number() + rhs->Min()->Number();
+  results[3] = lhs->Max()->Number() + rhs->Max()->Number();
+  // Since none of the inputs can be -0, the result cannot be -0 either.
+  // However, it can be nan (the sum of two infinities of opposite sign).
+  // On the other hand, if none of the "results" above is nan, then the actual
+  // result cannot be nan either.
+  int nans = 0;
+  for (int i = 0; i < 4; ++i) {
+    if (std::isnan(results[i])) ++nans;
+  }
+  if (nans == 4) return Type::NaN();  // [-inf..-inf] + [inf..inf] or vice versa
+  Factory* f = t->isolate()->factory();
+  Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
+                            f->NewNumber(array_max(results, 4)), t->zone());
+  return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
+  // Examples:
+  //   [-inf, -inf] + [+inf, +inf] = NaN
+  //   [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
+  //   [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
+  //   [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
 }
 
 
-Bounds Typer::Visitor::TypeJSDivide(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = ToPrimitive(lhs, t);
+  rhs = ToPrimitive(rhs, t);
+  if (lhs->Maybe(Type::String()) || rhs->Maybe(Type::String())) {
+    if (lhs->Is(Type::String()) || rhs->Is(Type::String())) {
+      return Type::String();
+    } else {
+      return Type::NumberOrString();
+    }
+  }
+  lhs = Rangify(ToNumber(lhs, t), t);
+  rhs = Rangify(ToNumber(rhs, t), t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return JSAddRanger(lhs->AsRange(), rhs->AsRange(), t);
+  }
+  // TODO(neis): Deal with numeric bitsets here and elsewhere.
+  return Type::Number();
 }
 
 
-Bounds Typer::Visitor::TypeJSModulus(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
+                                       Type::RangeType* rhs, Typer* t) {
+  double results[4];
+  results[0] = lhs->Min()->Number() - rhs->Min()->Number();
+  results[1] = lhs->Min()->Number() - rhs->Max()->Number();
+  results[2] = lhs->Max()->Number() - rhs->Min()->Number();
+  results[3] = lhs->Max()->Number() - rhs->Max()->Number();
+  // Since none of the inputs can be -0, the result cannot be -0.
+  // However, it can be nan (the subtraction of two infinities of same sign).
+  // On the other hand, if none of the "results" above is nan, then the actual
+  // result cannot be nan either.
+  int nans = 0;
+  for (int i = 0; i < 4; ++i) {
+    if (std::isnan(results[i])) ++nans;
+  }
+  if (nans == 4) return Type::NaN();  // [inf..inf] - [inf..inf] (all same sign)
+  Factory* f = t->isolate()->factory();
+  Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
+                            f->NewNumber(array_max(results, 4)), t->zone());
+  return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
+  // Examples:
+  //   [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
+  //   [-inf, -inf] - [-inf, -inf] = NaN
+  //   [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
+  //   [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
+}
+
+
+Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = Rangify(ToNumber(lhs, t), t);
+  rhs = Rangify(ToNumber(rhs, t), t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return JSSubtractRanger(lhs->AsRange(), rhs->AsRange(), t);
+  }
+  return Type::Number();
+}
+
+
+Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
+                                       Type::RangeType* rhs, Typer* t) {
+  double results[4];
+  double lmin = lhs->Min()->Number();
+  double lmax = lhs->Max()->Number();
+  double rmin = rhs->Min()->Number();
+  double rmax = rhs->Max()->Number();
+  results[0] = lmin * rmin;
+  results[1] = lmin * rmax;
+  results[2] = lmax * rmin;
+  results[3] = lmax * rmax;
+  // If the result may be nan, we give up on calculating a precise type, because
+  // the discontinuity makes it too complicated.  Note that even if none of the
+  // "results" above is nan, the actual result may still be, so we have to do a
+  // different check:
+  bool maybe_nan = (lhs->Maybe(t->singleton_zero) &&
+                    (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+                   (rhs->Maybe(t->singleton_zero) &&
+                    (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+  if (maybe_nan) return t->weakint;  // Giving up.
+  bool maybe_minuszero = (lhs->Maybe(t->singleton_zero) && rmin < 0) ||
+                         (rhs->Maybe(t->singleton_zero) && lmin < 0);
+  Factory* f = t->isolate()->factory();
+  Type* range = Type::Range(f->NewNumber(array_min(results, 4)),
+                            f->NewNumber(array_max(results, 4)), t->zone());
+  return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
+                         : range;
+}
+
+
+Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = Rangify(ToNumber(lhs, t), t);
+  rhs = Rangify(ToNumber(rhs, t), t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return JSMultiplyRanger(lhs->AsRange(), rhs->AsRange(), t);
+  }
+  return Type::Number();
+}
+
+
+Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = ToNumber(lhs, t);
+  rhs = ToNumber(rhs, t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+  // Division is tricky, so all we do is try ruling out nan.
+  // TODO(neis): try ruling out -0 as well?
+  bool maybe_nan =
+      lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+      ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
+       (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
+  return maybe_nan ? Type::Number() : Type::OrderedNumber();
+}
+
+
+Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
+                                      Type::RangeType* rhs, Typer* t) {
+  double lmin = lhs->Min()->Number();
+  double lmax = lhs->Max()->Number();
+  double rmin = rhs->Min()->Number();
+  double rmax = rhs->Max()->Number();
+
+  double labs = std::max(std::abs(lmin), std::abs(lmax));
+  double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
+  double abs = std::min(labs, rabs);
+  bool maybe_minus_zero = false;
+  double omin = 0;
+  double omax = 0;
+  if (lmin >= 0) {  // {lhs} positive.
+    omin = 0;
+    omax = abs;
+  } else if (lmax <= 0) {  // {lhs} negative.
+    omin = 0 - abs;
+    omax = 0;
+    maybe_minus_zero = true;
+  } else {
+    omin = 0 - abs;
+    omax = abs;
+    maybe_minus_zero = true;
+  }
+
+  Factory* f = t->isolate()->factory();
+  Type* result = Type::Range(f->NewNumber(omin), f->NewNumber(omax), t->zone());
+  if (maybe_minus_zero)
+    result = Type::Union(result, Type::MinusZero(), t->zone());
+  return result;
+}
+
+
+Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
+  lhs = ToNumber(lhs, t);
+  rhs = ToNumber(rhs, t);
+  if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
+  if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->zeroish) ||
+      lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
+    // Result maybe NaN.
+    return Type::Number();
+  }
+
+  lhs = Rangify(lhs, t);
+  rhs = Rangify(rhs, t);
+  if (lhs->IsRange() && rhs->IsRange()) {
+    return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
+  }
+  return Type::OrderedNumber();
 }
 
 
 // JS unary operators.
 
+
+Type* Typer::Visitor::JSUnaryNotTyper(Type* type, Typer* t) {
+  return Invert(ToBoolean(type, t), t);
+}
+
+
 Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return TypeUnaryOp(node, JSUnaryNotTyper);
 }
 
 
 Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
-  return Bounds(Type::InternalizedString(zone()));
+  return Bounds(Type::None(zone()), Type::InternalizedString(zone()));
 }
 
 
 // JS conversion operators.
 
+
 Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return TypeUnaryOp(node, ToBoolean);
 }
 
 
 Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
-  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+  return TypeUnaryOp(node, ToNumber);
 }
 
 
 Bounds Typer::Visitor::TypeJSToString(Node* node) {
-  return Bounds(Type::None(zone()), Type::String(zone()));
+  return TypeUnaryOp(node, ToString);
 }
 
 
 Bounds Typer::Visitor::TypeJSToName(Node* node) {
-  return Bounds(Type::None(zone()), Type::Name(zone()));
+  return Bounds(Type::None(), Type::Name());
 }
 
 
 Bounds Typer::Visitor::TypeJSToObject(Node* node) {
-  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+  return Bounds(Type::None(), Type::Receiver());
 }
 
 
 // JS object operators.
 
+
 Bounds Typer::Visitor::TypeJSCreate(Node* node) {
-  return Bounds(Type::None(zone()), Type::Object(zone()));
+  return Bounds(Type::None(), Type::Object());
+}
+
+
+Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
+  // TODO(rossberg): Use range types and sized array types to filter undefined.
+  if (object->IsArray() && name->Is(Type::Integral32())) {
+    return Type::Union(
+        object->AsArray()->Element(), Type::Undefined(), t->zone());
+  }
+  return Type::Any();
 }
 
 
 Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
-  Bounds object = OperandType(node, 0);
-  Bounds name = OperandType(node, 1);
-  Bounds result = Bounds::Unbounded(zone());
-  // TODO(rossberg): Use range types and sized array types to filter undefined.
-  if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) {
-    result.lower = Type::Union(
-        object.lower->AsArray()->Element(), Type::Undefined(zone()), zone());
-  }
-  if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) {
-    result.upper = Type::Union(
-        object.upper->AsArray()->Element(),  Type::Undefined(zone()), zone());
-  }
-  return result;
+  return TypeBinaryOp(node, JSLoadPropertyTyper);
 }
 
 
@@ -501,6 +1242,52 @@
 }
 
 
+// Returns a somewhat larger range if we previously assigned
+// a (smaller) range to this node. This is used  to speed up
+// the fixpoint calculation in case there appears to be a loop
+// in the graph. In the current implementation, we are
+// increasing the limits to the closest power of two.
+Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
+  Type::RangeType* previous = previous_type->GetRange();
+  Type::RangeType* current = current_type->GetRange();
+  if (previous != NULL && current != NULL) {
+    double current_min = current->Min()->Number();
+    Handle<Object> new_min = current->Min();
+
+    // Find the closest lower entry in the list of allowed
+    // minima (or negative infinity if there is no such entry).
+    if (current_min != previous->Min()->Number()) {
+      new_min = typer_->integer->AsRange()->Min();
+      for (const auto val : typer_->weaken_min_limits_) {
+        if (val->Number() <= current_min) {
+          new_min = val;
+          break;
+        }
+      }
+    }
+
+    double current_max = current->Max()->Number();
+    Handle<Object> new_max = current->Max();
+    // Find the closest greater entry in the list of allowed
+    // maxima (or infinity if there is no such entry).
+    if (current_max != previous->Max()->Number()) {
+      new_max = typer_->integer->AsRange()->Max();
+      for (const auto val : typer_->weaken_max_limits_) {
+        if (val->Number() >= current_max) {
+          new_max = val;
+          break;
+        }
+      }
+    }
+
+    return Type::Union(current_type,
+                       Type::Range(new_min, new_max, typer_->zone()),
+                       typer_->zone());
+  }
+  return current_type;
+}
+
+
 Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
   UNREACHABLE();
   return Bounds();
@@ -514,30 +1301,36 @@
 
 
 Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 // JS context operators.
 
+
 Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
-  Bounds outer = OperandType(node, 0);
-  DCHECK(outer.upper->Maybe(Type::Internal()));
+  Bounds outer = Operand(node, 0);
+  Type* context_type = outer.upper;
+  if (context_type->Is(Type::None())) {
+    // Upper bound of context is not yet known.
+    return Bounds(Type::None(), Type::Any());
+  }
+
+  DCHECK(context_type->Maybe(Type::Internal()));
   // TODO(rossberg): More precisely, instead of the above assertion, we should
   // back-propagate the constraint that it has to be a subtype of Internal.
 
   ContextAccess access = OpParameter<ContextAccess>(node);
-  Type* context_type = outer.upper;
   MaybeHandle<Context> context;
   if (context_type->IsConstant()) {
     context = Handle<Context>::cast(context_type->AsConstant()->Value());
@@ -547,7 +1340,7 @@
   // bound.
   // TODO(rossberg): Could use scope info to fix upper bounds for constant
   // bindings if we know that this code is never shared.
-  for (int i = access.depth(); i > 0; --i) {
+  for (size_t i = access.depth(); i > 0; --i) {
     if (context_type->IsContext()) {
       context_type = context_type->AsContext()->Outer();
       if (context_type->IsConstant()) {
@@ -561,9 +1354,10 @@
     return Bounds::Unbounded(zone());
   } else {
     Handle<Object> value =
-        handle(context.ToHandleChecked()->get(access.index()), isolate());
+        handle(context.ToHandleChecked()->get(static_cast<int>(access.index())),
+               isolate());
     Type* lower = TypeConstant(value);
-    return Bounds(lower, Type::Any(zone()));
+    return Bounds(lower, Type::Any());
   }
 }
 
@@ -575,61 +1369,62 @@
 
 
 Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
 Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
   // TODO(rossberg): this is probably incorrect
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
-Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
-  Type* outer = ContextType(node);
-  return Bounds(Type::Context(outer, zone()));
+Bounds Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
+  Bounds outer = ContextOperand(node);
+  return Bounds(Type::Context(outer.upper, zone()));
 }
 
 
 // JS other operators.
 
+
 Bounds Typer::Visitor::TypeJSYield(Node* node) {
   return Bounds::Unbounded(zone());
 }
 
 
 Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
-  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+  return Bounds(Type::None(), Type::Receiver());
+}
+
+
+Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
+  return fun->IsFunction() ? fun->AsFunction()->Result() : Type::Any();
 }
 
 
 Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
-  Bounds fun = OperandType(node, 0);
-  Type* lower = fun.lower->IsFunction()
-      ? fun.lower->AsFunction()->Result() : Type::None(zone());
-  Type* upper = fun.upper->IsFunction()
-      ? fun.upper->AsFunction()->Result() : Type::Any(zone());
-  return Bounds(lower, upper);
+  return TypeUnaryOp(node, JSCallFunctionTyper);  // We ignore argument types.
 }
 
 
@@ -645,143 +1440,177 @@
 
 // Simplified operators.
 
+
+Bounds Typer::Visitor::TypeAnyToBoolean(Node* node) {
+  return TypeUnaryOp(node, ToBoolean);
+}
+
+
 Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), typer_->zero_or_one);
 }
 
 
 Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), Type::Number(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), Type::Number(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), Type::Number(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), Type::Number(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
-  return Bounds(Type::Number(zone()));
+  return Bounds(Type::None(zone()), Type::Number(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
-  Bounds arg = OperandType(node, 0);
-  Type* s32 = Type::Signed32(zone());
-  Type* lower = arg.lower->Is(s32) ? arg.lower : s32;
-  Type* upper = arg.upper->Is(s32) ? arg.upper : s32;
-  return Bounds(lower, upper);
+  return TypeUnaryOp(node, NumberToInt32);
 }
 
 
 Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
-  Bounds arg = OperandType(node, 0);
-  Type* u32 = Type::Unsigned32(zone());
-  Type* lower = arg.lower->Is(u32) ? arg.lower : u32;
-  Type* upper = arg.upper->Is(u32) ? arg.upper : u32;
-  return Bounds(lower, upper);
+  return TypeUnaryOp(node, NumberToUint32);
 }
 
 
 Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeStringEqual(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
-  return Bounds(Type::Boolean(zone()));
+  return Bounds(Type::None(zone()), Type::Boolean(zone()));
 }
 
 
 Bounds Typer::Visitor::TypeStringAdd(Node* node) {
-  return Bounds(Type::String(zone()));
+  return Bounds(Type::None(zone()), Type::String(zone()));
+}
+
+
+static Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
+  // TODO(neis): Enable when expressible.
+  /*
+  return Type::Union(
+      Type::Intersect(type, Type::Semantic(), zone),
+      Type::Intersect(rep, Type::Representation(), zone), zone);
+  */
+  return type;
 }
 
 
 Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
-  // TODO(titzer): type is type of input, representation is Word32.
-  return Bounds(Type::Integral32());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::UntaggedSigned32(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedSigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
-  return Bounds(Type::Integral32());  // TODO(titzer): add appropriate rep
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::UntaggedUnsigned32(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedUnsigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
-  // TODO(titzer): type is type of input, representation is Float64.
-  return Bounds(Type::Number());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Number()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::UntaggedFloat64(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedFloat64(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
-  // TODO(titzer): type is type of input, representation is Tagged.
-  return Bounds(Type::Integral32());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
+      ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
-  // TODO(titzer): type is type of input, representation is Tagged.
-  return Bounds(Type::Unsigned32());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
+      ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
-  // TODO(titzer): type is type of input, representation is Tagged.
-  return Bounds(Type::Number());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
+      ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
-  // TODO(titzer): type is type of input, representation is Bit.
-  return Bounds(Type::Boolean());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::UntaggedBit(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedBit(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
-  // TODO(titzer): type is type of input, representation is Tagged.
-  return Bounds(Type::Boolean());
+  Bounds arg = Operand(node, 0);
+  // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+  return Bounds(
+      ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
+      ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
 }
 
 
@@ -790,6 +1619,21 @@
 }
 
 
+Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
+  // TODO(bmeurer): This typing is not yet correct. Since we can still access
+  // out of bounds, the type in the general case has to include Undefined.
+  switch (BufferAccessOf(node->op()).external_array_type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    return Bounds(typer_->cache_->Get(k##Type));
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+  }
+  UNREACHABLE();
+  return Bounds();
+}
+
+
 Bounds Typer::Visitor::TypeLoadElement(Node* node) {
   return Bounds(ElementAccessOf(node->op()).type);
 }
@@ -801,104 +1645,465 @@
 }
 
 
+Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
 Bounds Typer::Visitor::TypeStoreElement(Node* node) {
   UNREACHABLE();
   return Bounds();
 }
 
 
+Bounds Typer::Visitor::TypeObjectIsSmi(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeObjectIsNonNegativeSmi(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
 // Machine operators.
 
-// TODO(rossberg): implement
-#define DEFINE_METHOD(x) \
-    Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); }
-MACHINE_OP_LIST(DEFINE_METHOD)
-#undef DEFINE_METHOD
+Bounds Typer::Visitor::TypeLoad(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeStore(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeWord32And(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Or(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Xor(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Shl(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Shr(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Sar(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Ror(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeWord32Equal(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeWord64And(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Or(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Xor(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Shl(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Shr(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Sar(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Ror(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeWord64Equal(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Add(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Sub(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Mul(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32MulHigh(Node* node) {
+  return Bounds(Type::Signed32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Div(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Mod(Node* node) {
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeInt32LessThan(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeUint32Div(Node* node) {
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeUint32LessThan(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeUint32Mod(Node* node) {
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeUint32MulHigh(Node* node) {
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeInt64Add(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt64Sub(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt64Mul(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt64Div(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt64Mod(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeInt64LessThan(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeUint64Div(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeUint64LessThan(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeUint64Mod(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Number(), Type::UntaggedFloat64(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Unsigned32(), Type::UntaggedUnsigned32(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Signed32(), Type::UntaggedFloat64(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Unsigned32(), Type::UntaggedFloat64(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Number(), Type::UntaggedFloat32(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
+  return Bounds(Type::Intersect(
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Add(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Sub(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Mul(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Div(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Mod(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Equal(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64LessThan(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Floor(Node* node) {
+  // TODO(sigurds): We could have a tighter bound here.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Ceil(Node* node) {
+  // TODO(sigurds): We could have a tighter bound here.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
+  // TODO(sigurds): We could have a tighter bound here.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
+  // TODO(sigurds): We could have a tighter bound here.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
+  return Bounds(Type::Internal());
+}
+
+
+Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
 
 
 // Heap constants.
 
+
 Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
-  if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() &&
-      !context().is_null()) {
-    Handle<Context> native =
-        handle(context().ToHandleChecked()->native_context(), isolate());
-    if (*value == native->math_abs_fun()) {
-      return typer_->number_fun1_;  // TODO(rossberg): can't express overloading
-    } else if (*value == native->math_acos_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_asin_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_atan_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_atan2_fun()) {
-      return typer_->number_fun2_;
-    } else if (*value == native->math_ceil_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_cos_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_exp_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_floor_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_imul_fun()) {
-      return typer_->imul_fun_;
-    } else if (*value == native->math_log_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_pow_fun()) {
-      return typer_->number_fun2_;
-    } else if (*value == native->math_random_fun()) {
-      return typer_->number_fun0_;
-    } else if (*value == native->math_round_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_sin_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_sqrt_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->math_tan_fun()) {
-      return typer_->number_fun1_;
-    } else if (*value == native->array_buffer_fun()) {
-      return typer_->array_buffer_fun_;
-    } else if (*value == native->int8_array_fun()) {
-      return typer_->int8_array_fun_;
-    } else if (*value == native->int16_array_fun()) {
-      return typer_->int16_array_fun_;
-    } else if (*value == native->int32_array_fun()) {
-      return typer_->int32_array_fun_;
-    } else if (*value == native->uint8_array_fun()) {
-      return typer_->uint8_array_fun_;
-    } else if (*value == native->uint16_array_fun()) {
-      return typer_->uint16_array_fun_;
-    } else if (*value == native->uint32_array_fun()) {
-      return typer_->uint32_array_fun_;
-    } else if (*value == native->float32_array_fun()) {
-      return typer_->float32_array_fun_;
-    } else if (*value == native->float64_array_fun()) {
-      return typer_->float64_array_fun_;
+  if (value->IsJSFunction()) {
+    if (JSFunction::cast(*value)->shared()->HasBuiltinFunctionId()) {
+      switch (JSFunction::cast(*value)->shared()->builtin_function_id()) {
+        case kMathRandom:
+          return typer_->random_fun_;
+        case kMathFloor:
+          return typer_->weakint_fun1_;
+        case kMathRound:
+          return typer_->weakint_fun1_;
+        case kMathCeil:
+          return typer_->weakint_fun1_;
+        // Unary math functions.
+        case kMathAbs:  // TODO(rossberg): can't express overloading
+        case kMathLog:
+        case kMathExp:
+        case kMathSqrt:
+        case kMathCos:
+        case kMathSin:
+        case kMathTan:
+        case kMathAcos:
+        case kMathAsin:
+        case kMathAtan:
+        case kMathFround:
+          return typer_->cache_->Get(kNumberFunc1);
+        // Binary math functions.
+        case kMathAtan2:
+        case kMathPow:
+        case kMathMax:
+        case kMathMin:
+          return typer_->cache_->Get(kNumberFunc2);
+        case kMathImul:
+          return typer_->cache_->Get(kImulFunc);
+        case kMathClz32:
+          return typer_->cache_->Get(kClz32Func);
+        default:
+          break;
+      }
+    } else if (JSFunction::cast(*value)->IsBuiltin() && !context().is_null()) {
+      Handle<Context> native =
+          handle(context().ToHandleChecked()->native_context(), isolate());
+      if (*value == native->array_buffer_fun()) {
+        return typer_->cache_->Get(kArrayBufferFunc);
+      } else if (*value == native->int8_array_fun()) {
+        return typer_->cache_->Get(kInt8ArrayFunc);
+      } else if (*value == native->int16_array_fun()) {
+        return typer_->cache_->Get(kInt16ArrayFunc);
+      } else if (*value == native->int32_array_fun()) {
+        return typer_->cache_->Get(kInt32ArrayFunc);
+      } else if (*value == native->uint8_array_fun()) {
+        return typer_->cache_->Get(kUint8ArrayFunc);
+      } else if (*value == native->uint16_array_fun()) {
+        return typer_->cache_->Get(kUint16ArrayFunc);
+      } else if (*value == native->uint32_array_fun()) {
+        return typer_->cache_->Get(kUint32ArrayFunc);
+      } else if (*value == native->float32_array_fun()) {
+        return typer_->cache_->Get(kFloat32ArrayFunc);
+      } else if (*value == native->float64_array_fun()) {
+        return typer_->cache_->Get(kFloat64ArrayFunc);
+      }
+    }
+  } else if (value->IsJSTypedArray()) {
+    switch (JSTypedArray::cast(*value)->type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    return typer_->cache_->Get(k##Type##Array);
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
     }
   }
   return Type::Constant(value, zone());
 }
 
-
-namespace {
-
-class TyperDecorator : public GraphDecorator {
- public:
-  explicit TyperDecorator(Typer* typer) : typer_(typer) {}
-  virtual void Decorate(Node* node) { typer_->Init(node); }
-
- private:
-  Typer* typer_;
-};
-
-}
-
-
-void Typer::DecorateGraph(Graph* graph) {
-  graph->AddDecorator(new (zone()) TyperDecorator(this));
-}
-
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 2957e4b..b65a9a5 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -15,43 +15,62 @@
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class LazyTypeCache;
+
+
 class Typer {
  public:
-  explicit Typer(Zone* zone);
+  explicit Typer(Graph* graph, MaybeHandle<Context> context);
+  ~Typer();
 
-  void Init(Node* node);
-  void Run(Graph* graph, MaybeHandle<Context> context);
-  void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context);
-  void Widen(Graph* graph, Node* node, MaybeHandle<Context> context);
+  void Run();
 
-  void DecorateGraph(Graph* graph);
-
-  Zone* zone() { return zone_; }
-  Isolate* isolate() { return zone_->isolate(); }
+  Graph* graph() { return graph_; }
+  MaybeHandle<Context> context() { return context_; }
+  Zone* zone() { return graph_->zone(); }
+  Isolate* isolate() { return zone()->isolate(); }
 
  private:
   class Visitor;
-  class RunVisitor;
-  class NarrowVisitor;
-  class WidenVisitor;
+  class Decorator;
+
+  Graph* graph_;
+  MaybeHandle<Context> context_;
+  Decorator* decorator_;
 
   Zone* zone_;
+  Type* boolean_or_number;
+  Type* undefined_or_null;
+  Type* undefined_or_number;
+  Type* negative_signed32;
+  Type* non_negative_signed32;
+  Type* singleton_false;
+  Type* singleton_true;
+  Type* singleton_zero;
+  Type* singleton_one;
+  Type* zero_or_one;
+  Type* zeroish;
+  Type* signed32ish;
+  Type* unsigned32ish;
+  Type* falsish;
+  Type* truish;
+  Type* integer;
+  Type* weakint;
   Type* number_fun0_;
   Type* number_fun1_;
   Type* number_fun2_;
-  Type* imul_fun_;
-  Type* array_buffer_fun_;
-  Type* int8_array_fun_;
-  Type* int16_array_fun_;
-  Type* int32_array_fun_;
-  Type* uint8_array_fun_;
-  Type* uint16_array_fun_;
-  Type* uint32_array_fun_;
-  Type* float32_array_fun_;
-  Type* float64_array_fun_;
+  Type* weakint_fun1_;
+  Type* random_fun_;
+  LazyTypeCache* cache_;
+
+  ZoneVector<Handle<Object> > weaken_min_limits_;
+  ZoneVector<Handle<Object> > weaken_max_limits_;
+  DISALLOW_COPY_AND_ASSIGN(Typer);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_TYPER_H_
diff --git a/src/compiler/value-numbering-reducer-unittest.cc b/src/compiler/value-numbering-reducer-unittest.cc
deleted file mode 100644
index 8db6458..0000000
--- a/src/compiler/value-numbering-reducer-unittest.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/compiler/graph.h"
-#include "src/compiler/value-numbering-reducer.h"
-#include "src/test/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-const SimpleOperator kOp0(0, Operator::kNoProperties, 0, 1, "op0");
-const SimpleOperator kOp1(1, Operator::kNoProperties, 1, 1, "op1");
-
-}  // namespace
-
-
-class ValueNumberingReducerTest : public TestWithZone {
- public:
-  ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
-
- protected:
-  Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
-
-  Graph* graph() { return &graph_; }
-
- private:
-  Graph graph_;
-  ValueNumberingReducer reducer_;
-};
-
-
-TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
-  Node* na = graph()->NewNode(&kOp0);
-  Node* nb = graph()->NewNode(&kOp0);
-  Node* n1 = graph()->NewNode(&kOp0, na);
-  Node* n2 = graph()->NewNode(&kOp0, nb);
-  EXPECT_FALSE(Reduce(n1).Changed());
-  EXPECT_FALSE(Reduce(n2).Changed());
-}
-
-
-TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
-  Node* n0 = graph()->NewNode(&kOp0);
-  Node* n1 = graph()->NewNode(&kOp1, n0);
-  EXPECT_FALSE(Reduce(n1).Changed());
-  n1->Kill();
-  EXPECT_FALSE(Reduce(graph()->NewNode(&kOp1, n0)).Changed());
-}
-
-
-TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
-  static const size_t kMaxInputCount = 16;
-  Node* inputs[kMaxInputCount];
-  for (size_t i = 0; i < arraysize(inputs); ++i) {
-    Operator::Opcode opcode = static_cast<Operator::Opcode>(
-        std::numeric_limits<Operator::Opcode>::max() - i);
-    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
-        opcode, Operator::kNoProperties, 0, 1, "Operator"));
-  }
-  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
-    const SimpleOperator op1(static_cast<Operator::Opcode>(input_count),
-                             Operator::kNoProperties,
-                             static_cast<int>(input_count), 1, "op");
-    Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
-    Reduction r1 = Reduce(n1);
-    EXPECT_FALSE(r1.Changed());
-
-    const SimpleOperator op2(static_cast<Operator::Opcode>(input_count),
-                             Operator::kNoProperties,
-                             static_cast<int>(input_count), 1, "op");
-    Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
-    Reduction r2 = Reduce(n2);
-    EXPECT_TRUE(r2.Changed());
-    EXPECT_EQ(n1, r2.replacement());
-  }
-}
-
-
-TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
-  static const size_t kMaxInputCount = 16;
-  Node* inputs[kMaxInputCount];
-  for (size_t i = 0; i < arraysize(inputs); ++i) {
-    Operator::Opcode opcode = static_cast<Operator::Opcode>(
-        std::numeric_limits<Operator::Opcode>::max() - i);
-    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
-        opcode, Operator::kNoProperties, 0, 1, "Operator"));
-  }
-  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
-    const SimpleOperator op1(1, Operator::kNoProperties,
-                             static_cast<int>(input_count), 1, "op1");
-    Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
-    Reduction r = Reduce(n);
-    EXPECT_FALSE(r.Changed());
-
-    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_EQ(n, r.replacement());
-
-    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_EQ(n, r.replacement());
-  }
-}
-
-
-TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
-  Node* n = graph()->NewNode(&kOp0);
-  EXPECT_FALSE(Reduce(n).Changed());
-  EXPECT_FALSE(Reduce(n).Changed());
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 595a4f3..734b3e8 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -4,6 +4,9 @@
 
 #include "src/compiler/value-numbering-reducer.h"
 
+#include <cstring>
+
+#include "src/base/functional.h"
 #include "src/compiler/node.h"
 
 namespace v8 {
@@ -12,7 +15,13 @@
 
 namespace {
 
-size_t HashCode(Node* node) { return node->op()->HashCode(); }
+size_t HashCode(Node* node) {
+  size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
+  for (int j = 0; j < node->InputCount(); ++j) {
+    h = base::hash_combine(h, node->InputAt(j)->id());
+  }
+  return h;
+}
 
 
 bool Equals(Node* a, Node* b) {
@@ -33,40 +42,121 @@
 }  // namespace
 
 
-class ValueNumberingReducer::Entry FINAL : public ZoneObject {
- public:
-  Entry(Node* node, Entry* next) : node_(node), next_(next) {}
-
-  Node* node() const { return node_; }
-  Entry* next() const { return next_; }
-
- private:
-  Node* node_;
-  Entry* next_;
-};
-
-
-ValueNumberingReducer::ValueNumberingReducer(Zone* zone) : zone_(zone) {
-  for (size_t i = 0; i < arraysize(buckets_); ++i) {
-    buckets_[i] = NULL;
-  }
-}
+ValueNumberingReducer::ValueNumberingReducer(Zone* zone)
+    : entries_(nullptr), capacity_(0), size_(0), zone_(zone) {}
 
 
 ValueNumberingReducer::~ValueNumberingReducer() {}
 
 
 Reduction ValueNumberingReducer::Reduce(Node* node) {
-  Entry** head = &buckets_[HashCode(node) % arraysize(buckets_)];
-  for (Entry* entry = *head; entry; entry = entry->next()) {
-    if (entry->node()->IsDead()) continue;
-    if (entry->node() == node) return NoChange();
-    if (Equals(node, entry->node())) {
-      return Replace(entry->node());
+  if (!node->op()->HasProperty(Operator::kEliminatable)) return NoChange();
+
+  const size_t hash = HashCode(node);
+  if (!entries_) {
+    DCHECK(size_ == 0);
+    DCHECK(capacity_ == 0);
+    // Allocate the initial entries and insert the first entry.
+    capacity_ = kInitialCapacity;
+    entries_ = zone()->NewArray<Node*>(kInitialCapacity);
+    memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
+    entries_[hash & (kInitialCapacity - 1)] = node;
+    size_ = 1;
+    return NoChange();
+  }
+
+  DCHECK(size_ < capacity_);
+  DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+
+  const size_t mask = capacity_ - 1;
+  size_t dead = capacity_;
+
+  for (size_t i = hash & mask;; i = (i + 1) & mask) {
+    Node* entry = entries_[i];
+    if (!entry) {
+      if (dead != capacity_) {
+        // Reuse dead entry that we discovered on the way.
+        entries_[dead] = node;
+      } else {
+        // Have to insert a new entry.
+        entries_[i] = node;
+        size_++;
+
+        // Resize to keep load factor below 1/kCapacityToSizeRatio.
+        if (size_ * kCapacityToSizeRatio >= capacity_) Grow();
+      }
+      DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+      return NoChange();
+    }
+
+    if (entry == node) {
+      // We need to check for a certain class of collisions here. Imagine the
+      // following scenario:
+      //
+      //  1. We insert node1 with op1 and certain inputs at index i.
+      //  2. We insert node2 with op2 and certain inputs at index i+1.
+      //  3. Some other reducer changes node1 to op2 and the inputs from node2.
+      //
+      // Now we are called again to reduce node1, and we would return NoChange
+      // in this case because we find node1 first, but what we should actually
+      // do is return Replace(node2) instead.
+      for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
+        Node* entry = entries_[j];
+        if (!entry) {
+          // No collision, {node} is fine.
+          return NoChange();
+        }
+        if (entry->IsDead()) {
+          continue;
+        }
+        if (Equals(entry, node)) {
+          // Overwrite the colliding entry with the actual entry.
+          entries_[i] = entry;
+          return Replace(entry);
+        }
+      }
+    }
+
+    // Skip dead entries, but remember their indices so we can reuse them.
+    if (entry->IsDead()) {
+      dead = i;
+      continue;
+    }
+    if (Equals(entry, node)) {
+      return Replace(entry);
     }
   }
-  *head = new (zone()) Entry(node, *head);
-  return NoChange();
+}
+
+
+void ValueNumberingReducer::Grow() {
+  // Allocate a new block of entries kCapacityToSizeRatio times the previous
+  // capacity.
+  Node** const old_entries = entries_;
+  size_t const old_capacity = capacity_;
+  capacity_ *= kCapacityToSizeRatio;
+  entries_ = zone()->NewArray<Node*>(static_cast<int>(capacity_));
+  memset(entries_, 0, sizeof(*entries_) * capacity_);
+  size_ = 0;
+  size_t const mask = capacity_ - 1;
+
+  // Insert the old entries into the new block (skipping dead nodes).
+  for (size_t i = 0; i < old_capacity; ++i) {
+    Node* const old_entry = old_entries[i];
+    if (!old_entry || old_entry->IsDead()) continue;
+    for (size_t j = HashCode(old_entry) & mask;; j = (j + 1) & mask) {
+      Node* const entry = entries_[j];
+      if (entry == old_entry) {
+        // Skip duplicate of the old entry.
+        break;
+      }
+      if (!entry) {
+        entries_[j] = old_entry;
+        size_++;
+        break;
+      }
+    }
+  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
index 0d67e5d..546226c 100644
--- a/src/compiler/value-numbering-reducer.h
+++ b/src/compiler/value-numbering-reducer.h
@@ -16,16 +16,17 @@
   explicit ValueNumberingReducer(Zone* zone);
   ~ValueNumberingReducer();
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) OVERRIDE;
 
  private:
+  enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
+
+  void Grow();
   Zone* zone() const { return zone_; }
 
-  // TODO(turbofan): We currently use separate chaining with linked lists here,
-  // we may want to replace that with a more sophisticated data structure at
-  // some point in the future.
-  class Entry;
-  Entry* buckets_[117u];
+  Node** entries_;
+  size_t capacity_;
+  size_t size_;
   Zone* zone_;
 };
 
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 23cec7a..693b414 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -6,10 +6,11 @@
 
 #include <deque>
 #include <queue>
+#include <sstream>
+#include <string>
 
+#include "src/bit-vector.h"
 #include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/generic-node.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
@@ -18,7 +19,8 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/compiler/schedule.h"
-#include "src/data-flow.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -45,28 +47,84 @@
 
 class Verifier::Visitor : public NullNodeVisitor {
  public:
-  explicit Visitor(Zone* zone)
-      : reached_from_start(NodeSet::key_compare(),
-                           NodeSet::allocator_type(zone)),
-        reached_from_end(NodeSet::key_compare(),
-                         NodeSet::allocator_type(zone)) {}
+  Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
 
   // Fulfills the PreNodeCallback interface.
-  GenericGraphVisit::Control Pre(Node* node);
+  void Pre(Node* node);
 
-  bool from_start;
-  NodeSet reached_from_start;
-  NodeSet reached_from_end;
+  Zone* zone;
+  Typing typing;
+
+ private:
+  // TODO(rossberg): Get rid of these once we got rid of NodeProperties.
+  Bounds bounds(Node* node) { return NodeProperties::GetBounds(node); }
+  Node* ValueInput(Node* node, int i = 0) {
+    return NodeProperties::GetValueInput(node, i);
+  }
+  FieldAccess Field(Node* node) {
+    DCHECK(node->opcode() == IrOpcode::kLoadField ||
+           node->opcode() == IrOpcode::kStoreField);
+    return OpParameter<FieldAccess>(node);
+  }
+  ElementAccess Element(Node* node) {
+    DCHECK(node->opcode() == IrOpcode::kLoadElement ||
+           node->opcode() == IrOpcode::kStoreElement);
+    return OpParameter<ElementAccess>(node);
+  }
+  void CheckNotTyped(Node* node) {
+    if (NodeProperties::IsTyped(node)) {
+      std::ostringstream str;
+      str << "TypeError: node #" << node->opcode() << ":"
+          << node->op()->mnemonic() << " should never have a type";
+      V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+    }
+  }
+  void CheckUpperIs(Node* node, Type* type) {
+    if (typing == TYPED && !bounds(node).upper->Is(type)) {
+      std::ostringstream str;
+      str << "TypeError: node #" << node->opcode() << ":"
+          << node->op()->mnemonic() << " upper bound ";
+      bounds(node).upper->PrintTo(str);
+      str << " is not ";
+      type->PrintTo(str);
+      V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+    }
+  }
+  void CheckUpperMaybe(Node* node, Type* type) {
+    if (typing == TYPED && !bounds(node).upper->Maybe(type)) {
+      std::ostringstream str;
+      str << "TypeError: node #" << node->opcode() << ":"
+          << node->op()->mnemonic() << " upper bound ";
+      bounds(node).upper->PrintTo(str);
+      str << " must intersect ";
+      type->PrintTo(str);
+      V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+    }
+  }
+  void CheckValueInputIs(Node* node, int i, Type* type) {
+    Node* input = ValueInput(node, i);
+    if (typing == TYPED && !bounds(input).upper->Is(type)) {
+      std::ostringstream str;
+      str << "TypeError: node #" << node->opcode() << ":"
+          << node->op()->mnemonic() << "(input @" << i << " = "
+          << input->opcode() << ":" << input->op()->mnemonic()
+          << ") upper bound ";
+      bounds(input).upper->PrintTo(str);
+      str << " is not ";
+      type->PrintTo(str);
+      V8_Fatal(__FILE__, __LINE__, str.str().c_str());
+    }
+  }
 };
 
 
-GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
-  int value_count = OperatorProperties::GetValueInputCount(node->op());
+void Verifier::Visitor::Pre(Node* node) {
+  int value_count = node->op()->ValueInputCount();
   int context_count = OperatorProperties::GetContextInputCount(node->op());
   int frame_state_count =
       OperatorProperties::GetFrameStateInputCount(node->op());
-  int effect_count = OperatorProperties::GetEffectInputCount(node->op());
-  int control_count = OperatorProperties::GetControlInputCount(node->op());
+  int effect_count = node->op()->EffectInputCount();
+  int control_count = node->op()->ControlInputCount();
 
   // Verify number of inputs matches up.
   int input_count = value_count + context_count + frame_state_count +
@@ -87,7 +145,7 @@
   // Verify all value inputs actually produce a value.
   for (int i = 0; i < value_count; ++i) {
     Node* value = NodeProperties::GetValueInput(node, i);
-    CHECK(OperatorProperties::HasValueOutput(value->op()));
+    CHECK(value->op()->ValueOutputCount() > 0);
     CHECK(IsDefUseChainLinkPresent(value, node));
     CHECK(IsUseDefChainLinkPresent(value, node));
   }
@@ -95,7 +153,7 @@
   // Verify all context inputs are value nodes.
   for (int i = 0; i < context_count; ++i) {
     Node* context = NodeProperties::GetContextInput(node);
-    CHECK(OperatorProperties::HasValueOutput(context->op()));
+    CHECK(context->op()->ValueOutputCount() > 0);
     CHECK(IsDefUseChainLinkPresent(context, node));
     CHECK(IsUseDefChainLinkPresent(context, node));
   }
@@ -103,7 +161,7 @@
   // Verify all effect inputs actually have an effect.
   for (int i = 0; i < effect_count; ++i) {
     Node* effect = NodeProperties::GetEffectInput(node);
-    CHECK(OperatorProperties::HasEffectOutput(effect->op()));
+    CHECK(effect->op()->EffectOutputCount() > 0);
     CHECK(IsDefUseChainLinkPresent(effect, node));
     CHECK(IsUseDefChainLinkPresent(effect, node));
   }
@@ -111,18 +169,18 @@
   // Verify all control inputs are control nodes.
   for (int i = 0; i < control_count; ++i) {
     Node* control = NodeProperties::GetControlInput(node, i);
-    CHECK(OperatorProperties::HasControlOutput(control->op()));
+    CHECK(control->op()->ControlOutputCount() > 0);
     CHECK(IsDefUseChainLinkPresent(control, node));
     CHECK(IsUseDefChainLinkPresent(control, node));
   }
 
   // Verify all successors are projections if multiple value outputs exist.
-  if (OperatorProperties::GetValueOutputCount(node->op()) > 1) {
-    Node::Uses uses = node->uses();
-    for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
-      CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
-            (*it)->opcode() == IrOpcode::kProjection ||
-            (*it)->opcode() == IrOpcode::kParameter);
+  if (node->op()->ValueOutputCount() > 1) {
+    for (Edge edge : node->use_edges()) {
+      Node* use = edge.from();
+      CHECK(!NodeProperties::IsValueEdge(edge) ||
+            use->opcode() == IrOpcode::kProjection ||
+            use->opcode() == IrOpcode::kParameter);
     }
   }
 
@@ -130,12 +188,17 @@
     case IrOpcode::kStart:
       // Start has no inputs.
       CHECK_EQ(0, input_count);
+      // Type is a tuple.
+      // TODO(rossberg): Multiple outputs are currently typed as Internal.
+      CheckUpperIs(node, Type::Internal());
       break;
     case IrOpcode::kEnd:
       // End has no outputs.
-      CHECK(!OperatorProperties::HasValueOutput(node->op()));
-      CHECK(!OperatorProperties::HasEffectOutput(node->op()));
-      CHECK(!OperatorProperties::HasControlOutput(node->op()));
+      CHECK(node->op()->ValueOutputCount() == 0);
+      CHECK(node->op()->EffectOutputCount() == 0);
+      CHECK(node->op()->ControlOutputCount() == 0);
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kDead:
       // Dead is never connected to the graph.
@@ -143,31 +206,50 @@
     case IrOpcode::kBranch: {
       // Branch uses are IfTrue and IfFalse.
       Node::Uses uses = node->uses();
-      bool got_true = false, got_false = false;
+      int count_true = 0, count_false = 0;
       for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
-        CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
-              ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
-        if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true;
-        if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true;
+        CHECK((*it)->opcode() == IrOpcode::kIfTrue ||
+              (*it)->opcode() == IrOpcode::kIfFalse);
+        if ((*it)->opcode() == IrOpcode::kIfTrue) ++count_true;
+        if ((*it)->opcode() == IrOpcode::kIfFalse) ++count_false;
       }
-      // TODO(rossberg): Currently fails for various tests.
-      // CHECK(got_true && got_false);
+      CHECK(count_true == 1 && count_false == 1);
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     }
     case IrOpcode::kIfTrue:
     case IrOpcode::kIfFalse:
       CHECK_EQ(IrOpcode::kBranch,
                NodeProperties::GetControlInput(node, 0)->opcode());
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kLoop:
     case IrOpcode::kMerge:
+      CHECK_EQ(control_count, input_count);
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kReturn:
       // TODO(rossberg): check successor is End
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kThrow:
       // TODO(rossberg): what are the constraints on these?
+      // Type is empty.
+      CheckNotTyped(node);
       break;
+    case IrOpcode::kTerminate:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK_EQ(1, control_count);
+      CHECK_EQ(input_count, 1 + effect_count);
+      break;
+
+    // Common operators
+    // ----------------
     case IrOpcode::kParameter: {
       // Parameters have the start node as inputs.
       CHECK_EQ(1, input_count);
@@ -177,95 +259,522 @@
       int index = OpParameter<int>(node);
       Node* input = NodeProperties::GetValueInput(node, 0);
       // Currently, parameter indices start at -1 instead of 0.
-      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1);
+      CHECK_GT(input->op()->ValueOutputCount(), index + 1);
+      // Type can be anything.
+      CheckUpperIs(node, Type::Any());
       break;
     }
-    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt32Constant:  // TODO(rossberg): rename Word32Constant?
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      // Type is a 32 bit integer, signed or unsigned.
+      CheckUpperIs(node, Type::Integral32());
+      break;
     case IrOpcode::kInt64Constant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      // Type is internal.
+      // TODO(rossberg): Introduce proper Int64 type.
+      CheckUpperIs(node, Type::Internal());
+      break;
+    case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
-    case IrOpcode::kExternalConstant:
     case IrOpcode::kNumberConstant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      // Type is a number.
+      CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kHeapConstant:
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
+      // Type can be anything represented as a heap pointer.
+      CheckUpperIs(node, Type::TaggedPointer());
       break;
+    case IrOpcode::kExternalConstant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      // Type is considered internal.
+      CheckUpperIs(node, Type::Internal());
+      break;
+    case IrOpcode::kProjection: {
+      // Projection has an input that produces enough values.
+      int index = static_cast<int>(OpParameter<size_t>(node->op()));
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      CHECK_GT(input->op()->ValueOutputCount(), index);
+      // Type can be anything.
+      // TODO(rossberg): Introduce tuple types for this.
+      // TODO(titzer): Convince rossberg not to.
+      CheckUpperIs(node, Type::Any());
+      break;
+    }
+    case IrOpcode::kSelect: {
+      CHECK_EQ(0, effect_count);
+      CHECK_EQ(0, control_count);
+      CHECK_EQ(3, value_count);
+      break;
+    }
     case IrOpcode::kPhi: {
       // Phi input count matches parent control node.
+      CHECK_EQ(0, effect_count);
       CHECK_EQ(1, control_count);
       Node* control = NodeProperties::GetControlInput(node, 0);
-      CHECK_EQ(value_count,
-               OperatorProperties::GetControlInputCount(control->op()));
+      CHECK_EQ(value_count, control->op()->ControlInputCount());
+      CHECK_EQ(input_count, 1 + value_count);
+      // Type must be subsumed by all input types.
+      // TODO(rossberg): for now at least, narrowing does not really hold.
+      /*
+      for (int i = 0; i < value_count; ++i) {
+        // TODO(rossberg, jarin): Figure out what to do about lower bounds.
+        // CHECK(bounds(node).lower->Is(bounds(ValueInput(node, i)).lower));
+        CHECK(bounds(ValueInput(node, i)).upper->Is(bounds(node).upper));
+      }
+      */
       break;
     }
     case IrOpcode::kEffectPhi: {
       // EffectPhi input count matches parent control node.
+      CHECK_EQ(0, value_count);
       CHECK_EQ(1, control_count);
       Node* control = NodeProperties::GetControlInput(node, 0);
-      CHECK_EQ(effect_count,
-               OperatorProperties::GetControlInputCount(control->op()));
+      CHECK_EQ(effect_count, control->op()->ControlInputCount());
+      CHECK_EQ(input_count, 1 + effect_count);
+      break;
+    }
+    case IrOpcode::kValueEffect:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kFinish: {
+      // TODO(rossberg): what are the constraints on these?
+      // Type must be subsumed by input type.
+      if (typing == TYPED) {
+        CHECK(bounds(ValueInput(node)).lower->Is(bounds(node).lower));
+        CHECK(bounds(ValueInput(node)).upper->Is(bounds(node).upper));
+      }
       break;
     }
     case IrOpcode::kFrameState:
       // TODO(jarin): what are the constraints on these?
       break;
+    case IrOpcode::kStateValues:
+      // TODO(jarin): what are the constraints on these?
+      break;
     case IrOpcode::kCall:
       // TODO(rossberg): what are the constraints on these?
       break;
-    case IrOpcode::kProjection: {
-      // Projection has an input that produces enough values.
-      size_t index = OpParameter<size_t>(node);
-      Node* input = NodeProperties::GetValueInput(node, 0);
-      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()),
-               static_cast<int>(index));
+
+    // JavaScript operators
+    // --------------------
+    case IrOpcode::kJSEqual:
+    case IrOpcode::kJSNotEqual:
+    case IrOpcode::kJSStrictEqual:
+    case IrOpcode::kJSStrictNotEqual:
+    case IrOpcode::kJSLessThan:
+    case IrOpcode::kJSGreaterThan:
+    case IrOpcode::kJSLessThanOrEqual:
+    case IrOpcode::kJSGreaterThanOrEqual:
+    case IrOpcode::kJSUnaryNot:
+      // Type is Boolean.
+      CheckUpperIs(node, Type::Boolean());
+      break;
+
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+      // Type is 32 bit integral.
+      CheckUpperIs(node, Type::Integral32());
+      break;
+    case IrOpcode::kJSAdd:
+      // Type is Number or String.
+      CheckUpperIs(node, Type::NumberOrString());
+      break;
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus:
+      // Type is Number.
+      CheckUpperIs(node, Type::Number());
+      break;
+
+    case IrOpcode::kJSToBoolean:
+      // Type is Boolean.
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kJSToNumber:
+      // Type is Number.
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kJSToString:
+      // Type is String.
+      CheckUpperIs(node, Type::String());
+      break;
+    case IrOpcode::kJSToName:
+      // Type is Name.
+      CheckUpperIs(node, Type::Name());
+      break;
+    case IrOpcode::kJSToObject:
+      // Type is Receiver.
+      CheckUpperIs(node, Type::Receiver());
+      break;
+
+    case IrOpcode::kJSCreate:
+      // Type is Object.
+      CheckUpperIs(node, Type::Object());
+      break;
+    case IrOpcode::kJSLoadProperty:
+    case IrOpcode::kJSLoadNamed:
+      // Type can be anything.
+      CheckUpperIs(node, Type::Any());
+      break;
+    case IrOpcode::kJSStoreProperty:
+    case IrOpcode::kJSStoreNamed:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
+    case IrOpcode::kJSDeleteProperty:
+    case IrOpcode::kJSHasProperty:
+    case IrOpcode::kJSInstanceOf:
+      // Type is Boolean.
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kJSTypeOf:
+      // Type is String.
+      CheckUpperIs(node, Type::String());
+      break;
+
+    case IrOpcode::kJSLoadContext:
+      // Type can be anything.
+      CheckUpperIs(node, Type::Any());
+      break;
+    case IrOpcode::kJSStoreContext:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
+    case IrOpcode::kJSCreateFunctionContext:
+    case IrOpcode::kJSCreateCatchContext:
+    case IrOpcode::kJSCreateWithContext:
+    case IrOpcode::kJSCreateBlockContext:
+    case IrOpcode::kJSCreateModuleContext:
+    case IrOpcode::kJSCreateScriptContext: {
+      // Type is Context, and operand is Internal.
+      Node* context = NodeProperties::GetContextInput(node);
+      // TODO(rossberg): This should really be Is(Internal), but the typer
+      // currently can't do backwards propagation.
+      CheckUpperMaybe(context, Type::Internal());
+      if (typing == TYPED) CHECK(bounds(node).upper->IsContext());
       break;
     }
-    default:
-      // TODO(rossberg): Check other node kinds.
+
+    case IrOpcode::kJSCallConstruct:
+      // Type is Receiver.
+      CheckUpperIs(node, Type::Receiver());
+      break;
+    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallRuntime:
+    case IrOpcode::kJSYield:
+    case IrOpcode::kJSDebugger:
+      // Type can be anything.
+      CheckUpperIs(node, Type::Any());
+      break;
+
+    // Simplified operators
+    // -------------------------------
+    case IrOpcode::kAnyToBoolean:
+      // Type is Boolean.
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kBooleanNot:
+      // Boolean -> Boolean
+      CheckValueInputIs(node, 0, Type::Boolean());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kBooleanToNumber:
+      // Boolean -> Number
+      CheckValueInputIs(node, 0, Type::Boolean());
+      CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kNumberEqual:
+    case IrOpcode::kNumberLessThan:
+    case IrOpcode::kNumberLessThanOrEqual:
+      // (Number, Number) -> Boolean
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kNumberAdd:
+    case IrOpcode::kNumberSubtract:
+    case IrOpcode::kNumberMultiply:
+    case IrOpcode::kNumberDivide:
+    case IrOpcode::kNumberModulus:
+      // (Number, Number) -> Number
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckValueInputIs(node, 1, Type::Number());
+      // TODO(rossberg): activate once we retype after opcode changes.
+      // CheckUpperIs(node, Type::Number());
+      break;
+    case IrOpcode::kNumberToInt32:
+      // Number -> Signed32
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Signed32());
+      break;
+    case IrOpcode::kNumberToUint32:
+      // Number -> Unsigned32
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Unsigned32());
+      break;
+    case IrOpcode::kStringEqual:
+    case IrOpcode::kStringLessThan:
+    case IrOpcode::kStringLessThanOrEqual:
+      // (String, String) -> Boolean
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::String());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kStringAdd:
+      // (String, String) -> String
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::String());
+      CheckUpperIs(node, Type::String());
+      break;
+    case IrOpcode::kReferenceEqual: {
+      // (Unique, Any) -> Boolean  and
+      // (Any, Unique) -> Boolean
+      if (typing == TYPED) {
+        CHECK(bounds(ValueInput(node, 0)).upper->Is(Type::Unique()) ||
+              bounds(ValueInput(node, 1)).upper->Is(Type::Unique()));
+      }
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    }
+    case IrOpcode::kObjectIsSmi:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+    case IrOpcode::kObjectIsNonNegativeSmi:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckUpperIs(node, Type::Boolean());
+      break;
+
+    case IrOpcode::kChangeTaggedToInt32: {
+      // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
+      // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeTaggedToUint32: {
+      // Unsigned32 /\ Tagged -> Unsigned32 /\ UntaggedInt32
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Unsigned32(), Type::Tagged());
+      // Type* to =Type::Intersect(Type::Unsigned32(), Type::UntaggedInt32());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeTaggedToFloat64: {
+      // Number /\ Tagged -> Number /\ UntaggedFloat64
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
+      // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeInt32ToTagged: {
+      // Signed32 /\ UntaggedInt32 -> Signed32 /\ Tagged
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from =Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
+      // Type* to = Type::Intersect(Type::Signed32(), Type::Tagged());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeUint32ToTagged: {
+      // Unsigned32 /\ UntaggedInt32 -> Unsigned32 /\ Tagged
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from=Type::Intersect(Type::Unsigned32(),Type::UntaggedInt32());
+      // Type* to = Type::Intersect(Type::Unsigned32(), Type::Tagged());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToTagged: {
+      // Number /\ UntaggedFloat64 -> Number /\ Tagged
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from =Type::Intersect(Type::Number(), Type::UntaggedFloat64());
+      // Type* to = Type::Intersect(Type::Number(), Type::Tagged());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeBoolToBit: {
+      // Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
+      // Type* to = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+    case IrOpcode::kChangeBitToBool: {
+      // Boolean /\ UntaggedInt1 -> Boolean /\ TaggedPtr
+      // TODO(neis): Activate once ChangeRepresentation works in typer.
+      // Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
+      // Type* to = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
+      // CheckValueInputIs(node, 0, from));
+      // CheckUpperIs(node, to));
+      break;
+    }
+
+    case IrOpcode::kLoadField:
+      // Object -> fieldtype
+      // TODO(rossberg): activate once machine ops are typed.
+      // CheckValueInputIs(node, 0, Type::Object());
+      // CheckUpperIs(node, Field(node).type));
+      break;
+    case IrOpcode::kLoadBuffer:
+      break;
+    case IrOpcode::kLoadElement:
+      // Object -> elementtype
+      // TODO(rossberg): activate once machine ops are typed.
+      // CheckValueInputIs(node, 0, Type::Object());
+      // CheckUpperIs(node, Element(node).type));
+      break;
+    case IrOpcode::kStoreField:
+      // (Object, fieldtype) -> _|_
+      // TODO(rossberg): activate once machine ops are typed.
+      // CheckValueInputIs(node, 0, Type::Object());
+      // CheckValueInputIs(node, 1, Field(node).type));
+      CheckNotTyped(node);
+      break;
+    case IrOpcode::kStoreBuffer:
+      break;
+    case IrOpcode::kStoreElement:
+      // (Object, elementtype) -> _|_
+      // TODO(rossberg): activate once machine ops are typed.
+      // CheckValueInputIs(node, 0, Type::Object());
+      // CheckValueInputIs(node, 1, Element(node).type));
+      CheckNotTyped(node);
+      break;
+
+    // Machine operators
+    // -----------------------
+    case IrOpcode::kLoad:
+    case IrOpcode::kStore:
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kWord32Equal:
+    case IrOpcode::kWord64And:
+    case IrOpcode::kWord64Or:
+    case IrOpcode::kWord64Xor:
+    case IrOpcode::kWord64Shl:
+    case IrOpcode::kWord64Shr:
+    case IrOpcode::kWord64Sar:
+    case IrOpcode::kWord64Ror:
+    case IrOpcode::kWord64Equal:
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32SubWithOverflow:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kUint32MulHigh:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+    case IrOpcode::kInt64Add:
+    case IrOpcode::kInt64Sub:
+    case IrOpcode::kInt64Mul:
+    case IrOpcode::kInt64Div:
+    case IrOpcode::kInt64Mod:
+    case IrOpcode::kInt64LessThan:
+    case IrOpcode::kInt64LessThanOrEqual:
+    case IrOpcode::kUint64Div:
+    case IrOpcode::kUint64Mod:
+    case IrOpcode::kUint64LessThan:
+    case IrOpcode::kFloat64Add:
+    case IrOpcode::kFloat64Sub:
+    case IrOpcode::kFloat64Mul:
+    case IrOpcode::kFloat64Div:
+    case IrOpcode::kFloat64Mod:
+    case IrOpcode::kFloat64Sqrt:
+    case IrOpcode::kFloat64Floor:
+    case IrOpcode::kFloat64Ceil:
+    case IrOpcode::kFloat64RoundTruncate:
+    case IrOpcode::kFloat64RoundTiesAway:
+    case IrOpcode::kFloat64Equal:
+    case IrOpcode::kFloat64LessThan:
+    case IrOpcode::kFloat64LessThanOrEqual:
+    case IrOpcode::kTruncateInt64ToInt32:
+    case IrOpcode::kTruncateFloat64ToFloat32:
+    case IrOpcode::kTruncateFloat64ToInt32:
+    case IrOpcode::kChangeInt32ToInt64:
+    case IrOpcode::kChangeUint32ToUint64:
+    case IrOpcode::kChangeInt32ToFloat64:
+    case IrOpcode::kChangeUint32ToFloat64:
+    case IrOpcode::kChangeFloat32ToFloat64:
+    case IrOpcode::kChangeFloat64ToInt32:
+    case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kLoadStackPointer:
+    case IrOpcode::kCheckedLoad:
+    case IrOpcode::kCheckedStore:
+      // TODO(rossberg): Check.
       break;
   }
-
-  if (from_start) {
-    reached_from_start.insert(node);
-  } else {
-    reached_from_end.insert(node);
-  }
-
-  return GenericGraphVisit::CONTINUE;
 }
 
 
-void Verifier::Run(Graph* graph) {
-  Visitor visitor(graph->zone());
-
+void Verifier::Run(Graph* graph, Typing typing) {
+  Visitor visitor(graph->zone(), typing);
   CHECK_NE(NULL, graph->start());
-  visitor.from_start = true;
-  graph->VisitNodeUsesFromStart(&visitor);
   CHECK_NE(NULL, graph->end());
-  visitor.from_start = false;
   graph->VisitNodeInputsFromEnd(&visitor);
-
-  // All control nodes reachable from end are reachable from start.
-  for (NodeSet::iterator it = visitor.reached_from_end.begin();
-       it != visitor.reached_from_end.end(); ++it) {
-    CHECK(!NodeProperties::IsControl(*it) ||
-          visitor.reached_from_start.count(*it));
-  }
 }
 
 
+// -----------------------------------------------------------------------------
+
 static bool HasDominatingDef(Schedule* schedule, Node* node,
                              BasicBlock* container, BasicBlock* use_block,
                              int use_pos) {
   BasicBlock* block = use_block;
   while (true) {
     while (use_pos >= 0) {
-      if (block->nodes_[use_pos] == node) return true;
+      if (block->NodeAt(use_pos) == node) return true;
       use_pos--;
     }
-    block = block->dominator_;
+    block = block->dominator();
     if (block == NULL) break;
-    use_pos = static_cast<int>(block->nodes_.size()) - 1;
-    if (node == block->control_input_) return true;
+    use_pos = static_cast<int>(block->NodeCount()) - 1;
+    if (node == block->control_input()) return true;
+  }
+  return false;
+}
+
+
+static bool Dominates(Schedule* schedule, Node* dominator, Node* dominatee) {
+  BasicBlock* dom = schedule->block(dominator);
+  BasicBlock* sub = schedule->block(dominatee);
+  while (sub != NULL) {
+    if (sub == dom) {
+      return true;
+    }
+    sub = sub->dominator();
   }
   return false;
 }
@@ -273,123 +782,146 @@
 
 static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
                                 Node* node, int use_pos) {
-  for (int j = OperatorProperties::GetValueInputCount(node->op()) - 1; j >= 0;
-       j--) {
+  for (int j = node->op()->ValueInputCount() - 1; j >= 0; j--) {
     BasicBlock* use_block = block;
     if (node->opcode() == IrOpcode::kPhi) {
       use_block = use_block->PredecessorAt(j);
-      use_pos = static_cast<int>(use_block->nodes_.size()) - 1;
+      use_pos = static_cast<int>(use_block->NodeCount()) - 1;
     }
     Node* input = node->InputAt(j);
     if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
                           use_pos)) {
       V8_Fatal(__FILE__, __LINE__,
                "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
-               node->id(), node->op()->mnemonic(), block->id(), j, input->id(),
-               input->op()->mnemonic());
+               node->id(), node->op()->mnemonic(), block->id().ToInt(), j,
+               input->id(), input->op()->mnemonic());
+    }
+  }
+  // Ensure that nodes are dominated by their control inputs;
+  // kEnd is an exception, as unreachable blocks resulting from kMerge
+  // are not in the RPO.
+  if (node->op()->ControlInputCount() == 1 &&
+      node->opcode() != IrOpcode::kEnd) {
+    Node* ctl = NodeProperties::GetControlInput(node);
+    if (!Dominates(schedule, ctl, node)) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Node #%d:%s in B%d is not dominated by control input #%d:%s",
+               node->id(), node->op()->mnemonic(), block->id(), ctl->id(),
+               ctl->op()->mnemonic());
     }
   }
 }
 
 
 void ScheduleVerifier::Run(Schedule* schedule) {
-  const int count = schedule->BasicBlockCount();
+  const size_t count = schedule->BasicBlockCount();
   Zone tmp_zone(schedule->zone()->isolate());
   Zone* zone = &tmp_zone;
   BasicBlock* start = schedule->start();
   BasicBlockVector* rpo_order = schedule->rpo_order();
 
   // Verify the RPO order contains only blocks from this schedule.
-  CHECK_GE(count, static_cast<int>(rpo_order->size()));
+  CHECK_GE(count, rpo_order->size());
   for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
        ++b) {
     CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
+    // All predecessors and successors should be in rpo and in this schedule.
+    for (BasicBlock::Predecessors::iterator j = (*b)->predecessors_begin();
+         j != (*b)->predecessors_end(); ++j) {
+      CHECK_GE((*j)->rpo_number(), 0);
+      CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+    }
+    for (BasicBlock::Successors::iterator j = (*b)->successors_begin();
+         j != (*b)->successors_end(); ++j) {
+      CHECK_GE((*j)->rpo_number(), 0);
+      CHECK_EQ((*j), schedule->GetBlockById((*j)->id()));
+    }
   }
 
   // Verify RPO numbers of blocks.
   CHECK_EQ(start, rpo_order->at(0));  // Start should be first.
   for (size_t b = 0; b < rpo_order->size(); b++) {
     BasicBlock* block = rpo_order->at(b);
-    CHECK_EQ(static_cast<int>(b), block->rpo_number_);
-    BasicBlock* dom = block->dominator_;
+    CHECK_EQ(static_cast<int>(b), block->rpo_number());
+    BasicBlock* dom = block->dominator();
     if (b == 0) {
       // All blocks except start should have a dominator.
       CHECK_EQ(NULL, dom);
     } else {
       // Check that the immediate dominator appears somewhere before the block.
       CHECK_NE(NULL, dom);
-      CHECK_LT(dom->rpo_number_, block->rpo_number_);
+      CHECK_LT(dom->rpo_number(), block->rpo_number());
     }
   }
 
   // Verify that all blocks reachable from start are in the RPO.
-  BoolVector marked(count, false, zone);
+  BoolVector marked(static_cast<int>(count), false, zone);
   {
     ZoneQueue<BasicBlock*> queue(zone);
     queue.push(start);
-    marked[start->id()] = true;
+    marked[start->id().ToSize()] = true;
     while (!queue.empty()) {
       BasicBlock* block = queue.front();
       queue.pop();
-      for (int s = 0; s < block->SuccessorCount(); s++) {
+      for (size_t s = 0; s < block->SuccessorCount(); s++) {
         BasicBlock* succ = block->SuccessorAt(s);
-        if (!marked[succ->id()]) {
-          marked[succ->id()] = true;
+        if (!marked[succ->id().ToSize()]) {
+          marked[succ->id().ToSize()] = true;
           queue.push(succ);
         }
       }
     }
   }
   // Verify marked blocks are in the RPO.
-  for (int i = 0; i < count; i++) {
-    BasicBlock* block = schedule->GetBlockById(i);
+  for (size_t i = 0; i < count; i++) {
+    BasicBlock* block = schedule->GetBlockById(BasicBlock::Id::FromSize(i));
     if (marked[i]) {
-      CHECK_GE(block->rpo_number_, 0);
-      CHECK_EQ(block, rpo_order->at(block->rpo_number_));
+      CHECK_GE(block->rpo_number(), 0);
+      CHECK_EQ(block, rpo_order->at(block->rpo_number()));
     }
   }
   // Verify RPO blocks are marked.
   for (size_t b = 0; b < rpo_order->size(); b++) {
-    CHECK(marked[rpo_order->at(b)->id()]);
+    CHECK(marked[rpo_order->at(b)->id().ToSize()]);
   }
 
   {
     // Verify the dominance relation.
-    ZoneList<BitVector*> dominators(count, zone);
-    dominators.Initialize(count, zone);
-    dominators.AddBlock(NULL, count, zone);
+    ZoneVector<BitVector*> dominators(zone);
+    dominators.resize(count, NULL);
 
     // Compute a set of all the nodes that dominate a given node by using
     // a forward fixpoint. O(n^2).
     ZoneQueue<BasicBlock*> queue(zone);
     queue.push(start);
-    dominators[start->id()] = new (zone) BitVector(count, zone);
+    dominators[start->id().ToSize()] =
+        new (zone) BitVector(static_cast<int>(count), zone);
     while (!queue.empty()) {
       BasicBlock* block = queue.front();
       queue.pop();
-      BitVector* block_doms = dominators[block->id()];
-      BasicBlock* idom = block->dominator_;
-      if (idom != NULL && !block_doms->Contains(idom->id())) {
+      BitVector* block_doms = dominators[block->id().ToSize()];
+      BasicBlock* idom = block->dominator();
+      if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
         V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
-                 block->id(), idom->id());
+                 block->id().ToInt(), idom->id().ToInt());
       }
-      for (int s = 0; s < block->SuccessorCount(); s++) {
+      for (size_t s = 0; s < block->SuccessorCount(); s++) {
         BasicBlock* succ = block->SuccessorAt(s);
-        BitVector* succ_doms = dominators[succ->id()];
+        BitVector* succ_doms = dominators[succ->id().ToSize()];
 
         if (succ_doms == NULL) {
           // First time visiting the node. S.doms = B U B.doms
-          succ_doms = new (zone) BitVector(count, zone);
+          succ_doms = new (zone) BitVector(static_cast<int>(count), zone);
           succ_doms->CopyFrom(*block_doms);
-          succ_doms->Add(block->id());
-          dominators[succ->id()] = succ_doms;
+          succ_doms->Add(block->id().ToInt());
+          dominators[succ->id().ToSize()] = succ_doms;
           queue.push(succ);
         } else {
           // Nth time visiting the successor. S.doms = S.doms ^ (B U B.doms)
-          bool had = succ_doms->Contains(block->id());
-          if (had) succ_doms->Remove(block->id());
+          bool had = succ_doms->Contains(block->id().ToInt());
+          if (had) succ_doms->Remove(block->id().ToInt());
           if (succ_doms->IntersectIsChanged(*block_doms)) queue.push(succ);
-          if (had) succ_doms->Add(block->id());
+          if (had) succ_doms->Add(block->id().ToInt());
         }
       }
     }
@@ -398,16 +930,18 @@
     for (BasicBlockVector::iterator b = rpo_order->begin();
          b != rpo_order->end(); ++b) {
       BasicBlock* block = *b;
-      BasicBlock* idom = block->dominator_;
+      BasicBlock* idom = block->dominator();
       if (idom == NULL) continue;
-      BitVector* block_doms = dominators[block->id()];
+      BitVector* block_doms = dominators[block->id().ToSize()];
 
       for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
-        BasicBlock* dom = schedule->GetBlockById(it.Current());
-        if (dom != idom && !dominators[idom->id()]->Contains(dom->id())) {
+        BasicBlock* dom =
+            schedule->GetBlockById(BasicBlock::Id::FromInt(it.Current()));
+        if (dom != idom &&
+            !dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
           V8_Fatal(__FILE__, __LINE__,
-                   "Block B%d is not immediately dominated by B%d", block->id(),
-                   idom->id());
+                   "Block B%d is not immediately dominated by B%d",
+                   block->id().ToInt(), idom->id().ToInt());
         }
       }
     }
@@ -421,8 +955,7 @@
       if (phi->opcode() != IrOpcode::kPhi) continue;
       // TODO(titzer): Nasty special case. Phis from RawMachineAssembler
       // schedules don't have control inputs.
-      if (phi->InputCount() >
-          OperatorProperties::GetValueInputCount(phi->op())) {
+      if (phi->InputCount() > phi->op()->ValueInputCount()) {
         Node* control = NodeProperties::GetControlInput(phi);
         CHECK(control->opcode() == IrOpcode::kMerge ||
               control->opcode() == IrOpcode::kLoop);
@@ -437,15 +970,15 @@
     BasicBlock* block = *b;
 
     // Check inputs to control for this block.
-    Node* control = block->control_input_;
+    Node* control = block->control_input();
     if (control != NULL) {
       CHECK_EQ(block, schedule->block(control));
       CheckInputsDominate(schedule, block, control,
-                          static_cast<int>(block->nodes_.size()) - 1);
+                          static_cast<int>(block->NodeCount()) - 1);
     }
     // Check inputs for all nodes in the block.
-    for (size_t i = 0; i < block->nodes_.size(); i++) {
-      Node* node = block->nodes_[i];
+    for (size_t i = 0; i < block->NodeCount(); i++) {
+      Node* node = block->NodeAt(i);
       CheckInputsDominate(schedule, block, node, static_cast<int>(i) - 1);
     }
   }
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
index b5c028e..67b7ba6 100644
--- a/src/compiler/verifier.h
+++ b/src/compiler/verifier.h
@@ -18,7 +18,9 @@
 // each node, etc.
 class Verifier {
  public:
-  static void Run(Graph* graph);
+  enum Typing { TYPED, UNTYPED };
+
+  static void Run(Graph* graph, Typing typing = TYPED);
 
  private:
   class Visitor;
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index f71d3bf..0480f9d 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -19,186 +19,487 @@
 #define __ masm()->
 
 
-// TODO(turbofan): Cleanup these hacks.
-enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
-
-
-struct Immediate64 {
-  uint64_t value;
-  Handle<Object> handle;
-  ExternalReference reference;
-  Immediate64Type type;
-};
-
-
-enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
-
-
-struct RegisterOrOperand {
-  RegisterOrOperand() : operand(no_reg, 0) {}
-  Register reg;
-  DoubleRegister double_reg;
-  Operand operand;
-  RegisterOrOperandType type;
-};
-
-
 // Adds X64 specific methods for decoding operands.
 class X64OperandConverter : public InstructionOperandConverter {
  public:
   X64OperandConverter(CodeGenerator* gen, Instruction* instr)
       : InstructionOperandConverter(gen, instr) {}
 
-  RegisterOrOperand InputRegisterOrOperand(int index) {
-    return ToRegisterOrOperand(instr_->InputAt(index));
-  }
-
   Immediate InputImmediate(int index) {
     return ToImmediate(instr_->InputAt(index));
   }
 
-  RegisterOrOperand OutputRegisterOrOperand() {
-    return ToRegisterOrOperand(instr_->Output());
-  }
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
 
-  Immediate64 InputImmediate64(int index) {
-    return ToImmediate64(instr_->InputAt(index));
-  }
-
-  Immediate64 ToImmediate64(InstructionOperand* operand) {
-    Constant constant = ToConstant(operand);
-    Immediate64 immediate;
-    immediate.value = 0xbeefdeaddeefbeed;
-    immediate.type = kImm64Value;
-    switch (constant.type()) {
-      case Constant::kInt32:
-      case Constant::kInt64:
-        immediate.value = constant.ToInt64();
-        return immediate;
-      case Constant::kFloat64:
-        immediate.type = kImm64Handle;
-        immediate.handle =
-            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
-        return immediate;
-      case Constant::kExternalReference:
-        immediate.type = kImm64Reference;
-        immediate.reference = constant.ToExternalReference();
-        return immediate;
-      case Constant::kHeapObject:
-        immediate.type = kImm64Handle;
-        immediate.handle = constant.ToHeapObject();
-        return immediate;
-    }
-    UNREACHABLE();
-    return immediate;
-  }
+  Operand OutputOperand() { return ToOperand(instr_->Output()); }
 
   Immediate ToImmediate(InstructionOperand* operand) {
-    Constant constant = ToConstant(operand);
-    switch (constant.type()) {
-      case Constant::kInt32:
-        return Immediate(constant.ToInt32());
-      case Constant::kInt64:
-      case Constant::kFloat64:
-      case Constant::kExternalReference:
-      case Constant::kHeapObject:
-        break;
-    }
-    UNREACHABLE();
-    return Immediate(-1);
+    return Immediate(ToConstant(operand).ToInt32());
   }
 
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
-    RegisterOrOperand result = ToRegisterOrOperand(op, extra);
-    DCHECK_EQ(kOperand, result.type);
-    return result.operand;
-  }
-
-  RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
-    RegisterOrOperand result;
-    if (op->IsRegister()) {
-      DCHECK(extra == 0);
-      result.type = kRegister;
-      result.reg = ToRegister(op);
-      return result;
-    } else if (op->IsDoubleRegister()) {
-      DCHECK(extra == 0);
-      DCHECK(extra == 0);
-      result.type = kDoubleRegister;
-      result.double_reg = ToDoubleRegister(op);
-      return result;
-    }
-
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-
-    result.type = kOperand;
     // The linkage computes where all spill slots are located.
     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
-    result.operand =
-        Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
-    return result;
+    return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
   }
 
-  Operand MemoryOperand(int* first_input) {
-    const int offset = *first_input;
-    switch (AddressingModeField::decode(instr_->opcode())) {
-      case kMode_MR1I: {
-        *first_input += 2;
-        Register index = InputRegister(offset + 1);
-        return Operand(InputRegister(offset + 0), index, times_1,
-                       0);  // TODO(dcarney): K != 0
+  static int NextOffset(int* offset) {
+    int i = *offset;
+    (*offset)++;
+    return i;
+  }
+
+  static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
+    STATIC_ASSERT(0 == static_cast<int>(times_1));
+    STATIC_ASSERT(1 == static_cast<int>(times_2));
+    STATIC_ASSERT(2 == static_cast<int>(times_4));
+    STATIC_ASSERT(3 == static_cast<int>(times_8));
+    int scale = static_cast<int>(mode - one);
+    DCHECK(scale >= 0 && scale < 4);
+    return static_cast<ScaleFactor>(scale);
+  }
+
+  Operand MemoryOperand(int* offset) {
+    AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+    switch (mode) {
+      case kMode_MR: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = 0;
+        return Operand(base, disp);
       }
-      case kMode_MRI:
-        *first_input += 2;
-        return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
-      default:
+      case kMode_MRI: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, disp);
+      }
+      case kMode_MR1:
+      case kMode_MR2:
+      case kMode_MR4:
+      case kMode_MR8: {
+        Register base = InputRegister(NextOffset(offset));
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_MR1, mode);
+        int32_t disp = 0;
+        return Operand(base, index, scale, disp);
+      }
+      case kMode_MR1I:
+      case kMode_MR2I:
+      case kMode_MR4I:
+      case kMode_MR8I: {
+        Register base = InputRegister(NextOffset(offset));
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(base, index, scale, disp);
+      }
+      case kMode_M1: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = 0;
+        return Operand(base, disp);
+      }
+      case kMode_M2:
+        UNREACHABLE();  // Should use kModeMR with more compact encoding instead
+        return Operand(no_reg, 0);
+      case kMode_M4:
+      case kMode_M8: {
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_M1, mode);
+        int32_t disp = 0;
+        return Operand(index, scale, disp);
+      }
+      case kMode_M1I:
+      case kMode_M2I:
+      case kMode_M4I:
+      case kMode_M8I: {
+        Register index = InputRegister(NextOffset(offset));
+        ScaleFactor scale = ScaleFor(kMode_M1I, mode);
+        int32_t disp = InputInt32(NextOffset(offset));
+        return Operand(index, scale, disp);
+      }
+      case kMode_None:
         UNREACHABLE();
         return Operand(no_reg, 0);
     }
+    UNREACHABLE();
+    return Operand(no_reg, 0);
   }
 
-  Operand MemoryOperand() {
-    int first_input = 0;
+  Operand MemoryOperand(int first_input = 0) {
     return MemoryOperand(&first_input);
   }
 };
 
 
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
 
-#define ASSEMBLE_BINOP(asm_instr)                            \
-  do {                                                       \
-    if (HasImmediateInput(instr, 1)) {                       \
-      RegisterOrOperand input = i.InputRegisterOrOperand(0); \
-      if (input.type == kRegister) {                         \
-        __ asm_instr(input.reg, i.InputImmediate(1));        \
-      } else {                                               \
-        __ asm_instr(input.operand, i.InputImmediate(1));    \
-      }                                                      \
-    } else {                                                 \
-      RegisterOrOperand input = i.InputRegisterOrOperand(1); \
-      if (input.type == kRegister) {                         \
-        __ asm_instr(i.InputRegister(0), input.reg);         \
-      } else {                                               \
-        __ asm_instr(i.InputRegister(0), input.operand);     \
-      }                                                      \
-    }                                                        \
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ xorl(result_, result_); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineLoadNaN FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+  XMMRegister const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+ public:
+  OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+                             XMMRegister input)
+      : OutOfLineCode(gen), result_(result), input_(input) {}
+
+  void Generate() FINAL {
+    __ subp(rsp, Immediate(kDoubleSize));
+    __ movsd(MemOperand(rsp, 0), input_);
+    __ SlowTruncateToI(result_, rsp, 0);
+    __ addp(rsp, Immediate(kDoubleSize));
+  }
+
+ private:
+  Register const result_;
+  XMMRegister const input_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_UNOP(asm_instr)         \
+  do {                                   \
+    if (instr->Output()->IsRegister()) { \
+      __ asm_instr(i.OutputRegister());  \
+    } else {                             \
+      __ asm_instr(i.OutputOperand());   \
+    }                                    \
   } while (0)
 
 
-#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
-  do {                                                                   \
-    if (HasImmediateInput(instr, 1)) {                                   \
-      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
-    } else {                                                             \
-      __ asm_instr##_cl(i.OutputRegister());                             \
-    }                                                                    \
+#define ASSEMBLE_BINOP(asm_instr)                              \
+  do {                                                         \
+    if (HasImmediateInput(instr, 1)) {                         \
+      if (instr->InputAt(0)->IsRegister()) {                   \
+        __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+      } else {                                                 \
+        __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
+      }                                                        \
+    } else {                                                   \
+      if (instr->InputAt(1)->IsRegister()) {                   \
+        __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
+      } else {                                                 \
+        __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
+      }                                                        \
+    }                                                          \
   } while (0)
 
 
+#define ASSEMBLE_MULT(asm_instr)                              \
+  do {                                                        \
+    if (HasImmediateInput(instr, 1)) {                        \
+      if (instr->InputAt(0)->IsRegister()) {                  \
+        __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
+                     i.InputImmediate(1));                    \
+      } else {                                                \
+        __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
+                     i.InputImmediate(1));                    \
+      }                                                       \
+    } else {                                                  \
+      if (instr->InputAt(1)->IsRegister()) {                  \
+        __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
+      } else {                                                \
+        __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
+      }                                                       \
+    }                                                         \
+  } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                   \
+  do {                                                                     \
+    if (HasImmediateInput(instr, 1)) {                                     \
+      if (instr->Output()->IsRegister()) {                                 \
+        __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+      } else {                                                             \
+        __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
+      }                                                                    \
+    } else {                                                               \
+      if (instr->Output()->IsRegister()) {                                 \
+        __ asm_instr##_cl(i.OutputRegister());                             \
+      } else {                                                             \
+        __ asm_instr##_cl(i.OutputOperand());                              \
+      }                                                                    \
+    }                                                                      \
+  } while (0)
+
+
+#define ASSEMBLE_DOUBLE_BINOP(asm_instr)                                \
+  do {                                                                  \
+    if (instr->InputAt(1)->IsDoubleRegister()) {                        \
+      __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+    } else {                                                            \
+      __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
+    }                                                                   \
+  } while (0)
+
+
+#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
+  do {                                                                 \
+    CpuFeatureScope avx_scope(masm(), AVX);                            \
+    if (instr->InputAt(1)->IsDoubleRegister()) {                       \
+      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                   i.InputDoubleRegister(1));                          \
+    } else {                                                           \
+      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                   i.InputOperand(1));                                 \
+    }                                                                  \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
+  do {                                                                       \
+    auto result = i.OutputDoubleRegister();                                  \
+    auto buffer = i.InputRegister(0);                                        \
+    auto index1 = i.InputRegister(1);                                        \
+    auto index2 = i.InputInt32(2);                                           \
+    OutOfLineCode* ool;                                                      \
+    if (instr->InputAt(3)->IsRegister()) {                                   \
+      auto length = i.InputRegister(3);                                      \
+      DCHECK_EQ(0, index2);                                                  \
+      __ cmpl(index1, length);                                               \
+      ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
+    } else {                                                                 \
+      auto length = i.InputInt32(3);                                         \
+      DCHECK_LE(index2, length);                                             \
+      __ cmpq(index1, Immediate(length - index2));                           \
+      class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
+       public:                                                               \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
+                           Register buffer, Register index1, int32_t index2, \
+                           int32_t length)                                   \
+            : OutOfLineCode(gen),                                            \
+              result_(result),                                               \
+              buffer_(buffer),                                               \
+              index1_(index1),                                               \
+              index2_(index2),                                               \
+              length_(length) {}                                             \
+                                                                             \
+        void Generate() FINAL {                                              \
+          __ leal(kScratchRegister, Operand(index1_, index2_));              \
+          __ pcmpeqd(result_, result_);                                      \
+          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ j(above_equal, exit());                                         \
+          __ asm_instr(result_,                                              \
+                       Operand(buffer_, kScratchRegister, times_1, 0));      \
+        }                                                                    \
+                                                                             \
+       private:                                                              \
+        XMMRegister const result_;                                           \
+        Register const buffer_;                                              \
+        Register const index1_;                                              \
+        int32_t const index2_;                                               \
+        int32_t const length_;                                               \
+      };                                                                     \
+      ool = new (zone())                                                     \
+          OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
+    }                                                                        \
+    __ j(above_equal, ool->entry());                                         \
+    __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
+    __ bind(ool->exit());                                                    \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    auto buffer = i.InputRegister(0);                                          \
+    auto index1 = i.InputRegister(1);                                          \
+    auto index2 = i.InputInt32(2);                                             \
+    OutOfLineCode* ool;                                                        \
+    if (instr->InputAt(3)->IsRegister()) {                                     \
+      auto length = i.InputRegister(3);                                        \
+      DCHECK_EQ(0, index2);                                                    \
+      __ cmpl(index1, length);                                                 \
+      ool = new (zone()) OutOfLineLoadZero(this, result);                      \
+    } else {                                                                   \
+      auto length = i.InputInt32(3);                                           \
+      DCHECK_LE(index2, length);                                               \
+      __ cmpq(index1, Immediate(length - index2));                             \
+      class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
+       public:                                                                 \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             Register buffer, Register index1, int32_t index2, \
+                             int32_t length)                                   \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_(buffer),                                                 \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length) {}                                               \
+                                                                               \
+        void Generate() FINAL {                                                \
+          Label oob;                                                           \
+          __ leal(kScratchRegister, Operand(index1_, index2_));                \
+          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          __ asm_instr(result_,                                                \
+                       Operand(buffer_, kScratchRegister, times_1, 0));        \
+          __ jmp(exit());                                                      \
+          __ bind(&oob);                                                       \
+          __ xorl(result_, result_);                                           \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const result_;                                                \
+        Register const buffer_;                                                \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+      };                                                                       \
+      ool = new (zone())                                                       \
+          OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
+    }                                                                          \
+    __ j(above_equal, ool->entry());                                           \
+    __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
+    __ bind(ool->exit());                                                      \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
+  do {                                                                       \
+    auto buffer = i.InputRegister(0);                                        \
+    auto index1 = i.InputRegister(1);                                        \
+    auto index2 = i.InputInt32(2);                                           \
+    auto value = i.InputDoubleRegister(4);                                   \
+    if (instr->InputAt(3)->IsRegister()) {                                   \
+      auto length = i.InputRegister(3);                                      \
+      DCHECK_EQ(0, index2);                                                  \
+      Label done;                                                            \
+      __ cmpl(index1, length);                                               \
+      __ j(above_equal, &done, Label::kNear);                                \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+      __ bind(&done);                                                        \
+    } else {                                                                 \
+      auto length = i.InputInt32(3);                                         \
+      DCHECK_LE(index2, length);                                             \
+      __ cmpq(index1, Immediate(length - index2));                           \
+      class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
+       public:                                                               \
+        OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
+                            Register index1, int32_t index2, int32_t length, \
+                            XMMRegister value)                               \
+            : OutOfLineCode(gen),                                            \
+              buffer_(buffer),                                               \
+              index1_(index1),                                               \
+              index2_(index2),                                               \
+              length_(length),                                               \
+              value_(value) {}                                               \
+                                                                             \
+        void Generate() FINAL {                                              \
+          __ leal(kScratchRegister, Operand(index1_, index2_));              \
+          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ j(above_equal, exit());                                         \
+          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
+                       value_);                                              \
+        }                                                                    \
+                                                                             \
+       private:                                                              \
+        Register const buffer_;                                              \
+        Register const index1_;                                              \
+        int32_t const index2_;                                               \
+        int32_t const length_;                                               \
+        XMMRegister const value_;                                            \
+      };                                                                     \
+      auto ool = new (zone())                                                \
+          OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
+      __ j(above_equal, ool->entry());                                       \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+      __ bind(ool->exit());                                                  \
+    }                                                                        \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
+  do {                                                                         \
+    auto buffer = i.InputRegister(0);                                          \
+    auto index1 = i.InputRegister(1);                                          \
+    auto index2 = i.InputInt32(2);                                             \
+    if (instr->InputAt(3)->IsRegister()) {                                     \
+      auto length = i.InputRegister(3);                                        \
+      DCHECK_EQ(0, index2);                                                    \
+      Label done;                                                              \
+      __ cmpl(index1, length);                                                 \
+      __ j(above_equal, &done, Label::kNear);                                  \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
+      __ bind(&done);                                                          \
+    } else {                                                                   \
+      auto length = i.InputInt32(3);                                           \
+      DCHECK_LE(index2, length);                                               \
+      __ cmpq(index1, Immediate(length - index2));                             \
+      class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
+       public:                                                                 \
+        OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value)                                     \
+            : OutOfLineCode(gen),                                              \
+              buffer_(buffer),                                                 \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value) {}                                                 \
+                                                                               \
+        void Generate() FINAL {                                                \
+          __ leal(kScratchRegister, Operand(index1_, index2_));                \
+          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ j(above_equal, exit());                                           \
+          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
+                       value_);                                                \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const buffer_;                                                \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        Value const value_;                                                    \
+      };                                                                       \
+      auto ool = new (zone())                                                  \
+          OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
+      __ j(above_equal, ool->entry());                                         \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
+      __ bind(ool->exit());                                                    \
+    }                                                                          \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
+  do {                                                           \
+    if (instr->InputAt(4)->IsRegister()) {                       \
+      Register value = i.InputRegister(4);                       \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
+    } else {                                                     \
+      Immediate value = i.InputImmediate(4);                     \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+    }                                                            \
+  } while (false)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X64OperandConverter i(this, instr);
@@ -230,7 +531,7 @@
       break;
     }
     case kArchJmp:
-      __ jmp(code_->GetLabel(i.InputBlock(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -238,9 +539,19 @@
     case kArchRet:
       AssembleReturn();
       break;
-    case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+    case kArchStackPointer:
+      __ movq(i.OutputRegister(), rsp);
       break;
+    case kArchTruncateDoubleToI: {
+      auto result = i.OutputRegister();
+      auto input = i.InputDoubleRegister(0);
+      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      __ cvttsd2siq(result, input);
+      __ cmpq(result, Immediate(1));
+      __ j(overflow, ool->entry());
+      __ bind(ool->exit());
+      break;
+    }
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -272,39 +583,23 @@
       ASSEMBLE_BINOP(testq);
       break;
     case kX64Imul32:
-      if (HasImmediateInput(instr, 1)) {
-        RegisterOrOperand input = i.InputRegisterOrOperand(0);
-        if (input.type == kRegister) {
-          __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
-        } else {
-          __ movq(kScratchRegister, input.operand);
-          __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
-        }
-      } else {
-        RegisterOrOperand input = i.InputRegisterOrOperand(1);
-        if (input.type == kRegister) {
-          __ imull(i.OutputRegister(), input.reg);
-        } else {
-          __ imull(i.OutputRegister(), input.operand);
-        }
-      }
+      ASSEMBLE_MULT(imull);
       break;
     case kX64Imul:
-      if (HasImmediateInput(instr, 1)) {
-        RegisterOrOperand input = i.InputRegisterOrOperand(0);
-        if (input.type == kRegister) {
-          __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
-        } else {
-          __ movq(kScratchRegister, input.operand);
-          __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
-        }
+      ASSEMBLE_MULT(imulq);
+      break;
+    case kX64ImulHigh32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ imull(i.InputRegister(1));
       } else {
-        RegisterOrOperand input = i.InputRegisterOrOperand(1);
-        if (input.type == kRegister) {
-          __ imulq(i.OutputRegister(), input.reg);
-        } else {
-          __ imulq(i.OutputRegister(), input.operand);
-        }
+        __ imull(i.InputOperand(1));
+      }
+      break;
+    case kX64UmulHigh32:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ mull(i.InputRegister(1));
+      } else {
+        __ mull(i.InputOperand(1));
       }
       break;
     case kX64Idiv32:
@@ -323,42 +618,18 @@
       __ xorq(rdx, rdx);
       __ divq(i.InputRegister(1));
       break;
-    case kX64Not: {
-      RegisterOrOperand output = i.OutputRegisterOrOperand();
-      if (output.type == kRegister) {
-        __ notq(output.reg);
-      } else {
-        __ notq(output.operand);
-      }
+    case kX64Not:
+      ASSEMBLE_UNOP(notq);
       break;
-    }
-    case kX64Not32: {
-      RegisterOrOperand output = i.OutputRegisterOrOperand();
-      if (output.type == kRegister) {
-        __ notl(output.reg);
-      } else {
-        __ notl(output.operand);
-      }
+    case kX64Not32:
+      ASSEMBLE_UNOP(notl);
       break;
-    }
-    case kX64Neg: {
-      RegisterOrOperand output = i.OutputRegisterOrOperand();
-      if (output.type == kRegister) {
-        __ negq(output.reg);
-      } else {
-        __ negq(output.operand);
-      }
+    case kX64Neg:
+      ASSEMBLE_UNOP(negq);
       break;
-    }
-    case kX64Neg32: {
-      RegisterOrOperand output = i.OutputRegisterOrOperand();
-      if (output.type == kRegister) {
-        __ negl(output.reg);
-      } else {
-        __ negl(output.operand);
-      }
+    case kX64Neg32:
+      ASSEMBLE_UNOP(negl);
       break;
-    }
     case kX64Or32:
       ASSEMBLE_BINOP(orl);
       break;
@@ -395,26 +666,20 @@
     case kX64Ror:
       ASSEMBLE_SHIFT(rorq, 6);
       break;
-    case kSSEFloat64Cmp: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(1);
-      if (input.type == kDoubleRegister) {
-        __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
-      } else {
-        __ ucomisd(i.InputDoubleRegister(0), input.operand);
-      }
+    case kSSEFloat64Cmp:
+      ASSEMBLE_DOUBLE_BINOP(ucomisd);
       break;
-    }
     case kSSEFloat64Add:
-      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_DOUBLE_BINOP(addsd);
       break;
     case kSSEFloat64Sub:
-      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_DOUBLE_BINOP(subsd);
       break;
     case kSSEFloat64Mul:
-      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_DOUBLE_BINOP(mulsd);
       break;
     case kSSEFloat64Div:
-      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_DOUBLE_BINOP(divsd);
       break;
     case kSSEFloat64Mod: {
       __ subq(rsp, Immediate(kDoubleSize));
@@ -431,7 +696,8 @@
       __ fprem();
       // The following 2 instruction implicitly use rax.
       __ fnstsw_ax();
-      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+      if (CpuFeatures::IsSupported(SAHF)) {
+        CpuFeatureScope sahf_scope(masm(), SAHF);
         __ sahf();
       } else {
         __ shrl(rax, Immediate(8));
@@ -447,52 +713,97 @@
       __ addq(rsp, Immediate(kDoubleSize));
       break;
     }
-    case kSSEFloat64Sqrt: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(0);
-      if (input.type == kDoubleRegister) {
-        __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+    case kSSEFloat64Sqrt:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       } else {
-        __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+        __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
-    }
-    case kSSEFloat64ToInt32: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(0);
-      if (input.type == kDoubleRegister) {
-        __ cvttsd2si(i.OutputRegister(), input.double_reg);
-      } else {
-        __ cvttsd2si(i.OutputRegister(), input.operand);
-      }
+    case kSSEFloat64Floor: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundDown);
       break;
     }
+    case kSSEFloat64Ceil: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundUp);
+      break;
+    }
+    case kSSEFloat64RoundTruncate: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundToZero);
+      break;
+    }
+    case kSSECvtss2sd:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+      }
+      break;
+    case kSSECvtsd2ss:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      }
+      break;
+    case kSSEFloat64ToInt32:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
     case kSSEFloat64ToUint32: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(0);
-      if (input.type == kDoubleRegister) {
-        __ cvttsd2siq(i.OutputRegister(), input.double_reg);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
       } else {
-        __ cvttsd2siq(i.OutputRegister(), input.operand);
+        __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
       }
-      __ andl(i.OutputRegister(), i.OutputRegister());  // clear upper bits.
-      // TODO(turbofan): generated code should not look at the upper 32 bits
-      // of the result, but those bits could escape to the outside world.
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     }
-    case kSSEInt32ToFloat64: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(0);
-      if (input.type == kRegister) {
-        __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+    case kSSEInt32ToFloat64:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
       } else {
-        __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+        __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
-    }
-    case kSSEUint32ToFloat64: {
-      // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
-      __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+    case kSSEUint32ToFloat64:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ movl(kScratchRegister, i.InputRegister(0));
+      } else {
+        __ movl(kScratchRegister, i.InputOperand(0));
+      }
+      __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
       break;
-    }
+    case kAVXFloat64Add:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
+      break;
+    case kAVXFloat64Sub:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
+      break;
+    case kAVXFloat64Mul:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
+      break;
+    case kAVXFloat64Div:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
+      break;
     case kX64Movsxbl:
-      __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      if (instr->addressing_mode() != kMode_None) {
+        __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ movsxbl(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ movsxbl(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxbl:
       __ movzxbl(i.OutputRegister(), i.MemoryOperand());
@@ -508,10 +819,18 @@
       break;
     }
     case kX64Movsxwl:
-      __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      if (instr->addressing_mode() != kMode_None) {
+        __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ movsxwl(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ movsxwl(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxwl:
       __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movw: {
       int index = 0;
@@ -526,15 +845,15 @@
     case kX64Movl:
       if (instr->HasOutput()) {
         if (instr->addressing_mode() == kMode_None) {
-          RegisterOrOperand input = i.InputRegisterOrOperand(0);
-          if (input.type == kRegister) {
-            __ movl(i.OutputRegister(), input.reg);
+          if (instr->InputAt(0)->IsRegister()) {
+            __ movl(i.OutputRegister(), i.InputRegister(0));
           } else {
-            __ movl(i.OutputRegister(), input.operand);
+            __ movl(i.OutputRegister(), i.InputOperand(0));
           }
         } else {
           __ movl(i.OutputRegister(), i.MemoryOperand());
         }
+        __ AssertZeroExtended(i.OutputRegister());
       } else {
         int index = 0;
         Operand operand = i.MemoryOperand(&index);
@@ -546,11 +865,10 @@
       }
       break;
     case kX64Movsxlq: {
-      RegisterOrOperand input = i.InputRegisterOrOperand(0);
-      if (input.type == kRegister) {
-        __ movsxlq(i.OutputRegister(), input.reg);
+      if (instr->InputAt(0)->IsRegister()) {
+        __ movsxlq(i.OutputRegister(), i.InputRegister(0));
       } else {
-        __ movsxlq(i.OutputRegister(), input.operand);
+        __ movsxlq(i.OutputRegister(), i.InputOperand(0));
       }
       break;
     }
@@ -570,12 +888,10 @@
     case kX64Movss:
       if (instr->HasOutput()) {
         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
-        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
       } else {
         int index = 0;
         Operand operand = i.MemoryOperand(&index);
-        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
-        __ movss(operand, xmm0);
+        __ movss(operand, i.InputDoubleRegister(index));
       }
       break;
     case kX64Movsd:
@@ -587,15 +903,57 @@
         __ movsd(operand, i.InputDoubleRegister(index));
       }
       break;
+    case kX64Lea32: {
+      AddressingMode mode = AddressingModeField::decode(instr->opcode());
+      // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+      // and addressing mode just happens to work out. The "addl"/"subl" forms
+      // in these cases are faster based on measurements.
+      if (i.InputRegister(0).is(i.OutputRegister())) {
+        if (mode == kMode_MRI) {
+          int32_t constant_summand = i.InputInt32(1);
+          if (constant_summand > 0) {
+            __ addl(i.OutputRegister(), Immediate(constant_summand));
+          } else if (constant_summand < 0) {
+            __ subl(i.OutputRegister(), Immediate(-constant_summand));
+          }
+        } else if (mode == kMode_MR1) {
+          if (i.InputRegister(1).is(i.OutputRegister())) {
+            __ shll(i.OutputRegister(), Immediate(1));
+          } else {
+            __ leal(i.OutputRegister(), i.MemoryOperand());
+          }
+        } else if (mode == kMode_M2) {
+          __ shll(i.OutputRegister(), Immediate(1));
+        } else if (mode == kMode_M4) {
+          __ shll(i.OutputRegister(), Immediate(2));
+        } else if (mode == kMode_M8) {
+          __ shll(i.OutputRegister(), Immediate(3));
+        } else {
+          __ leal(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        __ leal(i.OutputRegister(), i.MemoryOperand());
+      }
+      __ AssertZeroExtended(i.OutputRegister());
+      break;
+    }
+    case kX64Lea:
+      __ leaq(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Dec32:
+      __ decl(i.OutputRegister());
+      break;
+    case kX64Inc32:
+      __ incl(i.OutputRegister());
+      break;
     case kX64Push:
       if (HasImmediateInput(instr, 0)) {
         __ pushq(i.InputImmediate(0));
       } else {
-        RegisterOrOperand input = i.InputRegisterOrOperand(0);
-        if (input.type == kRegister) {
-          __ pushq(input.reg);
+        if (instr->InputAt(0)->IsRegister()) {
+          __ pushq(i.InputRegister(0));
         } else {
-          __ pushq(input.operand);
+          __ pushq(i.InputOperand(0));
         }
       }
       break;
@@ -606,31 +964,59 @@
       __ movsxlq(index, index);
       __ movq(Operand(object, index, times_1, 0), value);
       __ leaq(index, Operand(object, index, times_1, 0));
-      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
-                                ? kSaveFPRegs
-                                : kDontSaveFPRegs;
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
       __ RecordWrite(object, index, value, mode);
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movw);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movl);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+      break;
   }
 }
 
 
 // Assembles branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   X64OperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = code()->GetLabel(tblock);
-  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
-  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
-  switch (condition) {
+  Label::Distance flabel_distance =
+      branch->fallthru ? Label::kNear : Label::kFar;
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
     // Fall through.
@@ -686,8 +1072,12 @@
       __ j(no_overflow, tlabel);
       break;
   }
-  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
-  __ bind(&done);
+  if (!branch->fallthru) __ jmp(flabel, flabel_distance);
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
 
@@ -700,7 +1090,7 @@
   // Materialize a full 64-bit 1 or 0 value. The result register is always the
   // last output of the instruction.
   Label check;
-  DCHECK_NE(0, instr->OutputCount());
+  DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
   Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
   Condition cc = no_condition;
   switch (condition) {
@@ -802,27 +1192,10 @@
       frame()->SetRegisterSaveAreaSize(register_save_area_size);
     }
   } else if (descriptor->IsJSFunctionCall()) {
-    CompilationInfo* info = linkage()->info();
+    CompilationInfo* info = this->info();
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
-      __ movp(rcx, args.GetReceiverOperand());
-      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
-      __ j(not_equal, &ok, Label::kNear);
-      __ movp(rcx, GlobalObjectOperand());
-      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-      __ movp(args.GetReceiverOperand(), rcx);
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame()->SetRegisterSaveAreaSize(
@@ -899,31 +1272,57 @@
     }
   } else if (source->IsConstant()) {
     ConstantOperand* constant_source = ConstantOperand::cast(source);
+    Constant src = g.ToConstant(constant_source);
     if (destination->IsRegister() || destination->IsStackSlot()) {
       Register dst = destination->IsRegister() ? g.ToRegister(destination)
                                                : kScratchRegister;
-      Immediate64 imm = g.ToImmediate64(constant_source);
-      switch (imm.type) {
-        case kImm64Value:
-          __ Set(dst, imm.value);
+      switch (src.type()) {
+        case Constant::kInt32:
+          // TODO(dcarney): don't need scratch in this case.
+          __ Set(dst, src.ToInt32());
           break;
-        case kImm64Reference:
-          __ Move(dst, imm.reference);
+        case Constant::kInt64:
+          __ Set(dst, src.ToInt64());
           break;
-        case kImm64Handle:
-          __ Move(dst, imm.handle);
+        case Constant::kFloat32:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ Move(dst, src.ToExternalReference());
+          break;
+        case Constant::kHeapObject:
+          __ Move(dst, src.ToHeapObject());
+          break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): load of labels on x64.
           break;
       }
       if (destination->IsStackSlot()) {
         __ movq(g.ToOperand(destination), kScratchRegister);
       }
-    } else {
-      __ movq(kScratchRegister,
-              bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
+    } else if (src.type() == Constant::kFloat32) {
+      // TODO(turbofan): Can we do better here?
+      uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
       if (destination->IsDoubleRegister()) {
-        __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+        __ Move(g.ToDoubleRegister(destination), src_const);
       } else {
         DCHECK(destination->IsDoubleStackSlot());
+        Operand dst = g.ToOperand(destination);
+        __ movl(dst, Immediate(src_const));
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
+      if (destination->IsDoubleRegister()) {
+        __ Move(g.ToDoubleRegister(destination), src_const);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        __ movq(kScratchRegister, src_const);
         __ movq(g.ToOperand(destination), kScratchRegister);
       }
     }
@@ -985,7 +1384,7 @@
     __ movsd(xmm0, src);
     __ movsd(src, dst);
     __ movsd(dst, xmm0);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
     // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = g.ToDoubleRegister(source);
@@ -1005,7 +1404,7 @@
 
 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   int space_needed = Deoptimizer::patch_size();
-  if (!linkage()->info()->IsStub()) {
+  if (!info()->IsStub()) {
     // Ensure that we have enough space after the previous lazy-bailout
     // instruction for patching the code here.
     int current_pc = masm()->pc_offset();
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index dfad203..77e3e52 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -28,6 +28,8 @@
   V(X64Sub32)                      \
   V(X64Imul)                       \
   V(X64Imul32)                     \
+  V(X64ImulHigh32)                 \
+  V(X64UmulHigh32)                 \
   V(X64Idiv)                       \
   V(X64Idiv32)                     \
   V(X64Udiv)                       \
@@ -51,10 +53,19 @@
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
   V(SSEFloat64Sqrt)                \
+  V(SSEFloat64Floor)               \
+  V(SSEFloat64Ceil)                \
+  V(SSEFloat64RoundTruncate)       \
+  V(SSECvtss2sd)                   \
+  V(SSECvtsd2ss)                   \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
+  V(AVXFloat64Add)                 \
+  V(AVXFloat64Sub)                 \
+  V(AVXFloat64Mul)                 \
+  V(AVXFloat64Div)                 \
   V(X64Movsxbl)                    \
   V(X64Movzxbl)                    \
   V(X64Movb)                       \
@@ -66,6 +77,10 @@
   V(X64Movq)                       \
   V(X64Movsd)                      \
   V(X64Movss)                      \
+  V(X64Lea32)                      \
+  V(X64Lea)                        \
+  V(X64Dec32)                      \
+  V(X64Inc32)                      \
   V(X64Push)                       \
   V(X64StoreWriteBarrier)
 
@@ -77,22 +92,30 @@
 //
 // We use the following local notation for addressing modes:
 //
-// R = register
-// O = register or stack slot
-// D = double register
-// I = immediate (handle, external, int32)
-// MR = [register]
-// MI = [immediate]
-// MRN = [register + register * N in {1, 2, 4, 8}]
-// MRI = [register + immediate]
-// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+// M = memory operand
+// R = base register
+// N = index register * N for N in {1, 2, 4, 8}
+// I = immediate displacement (32-bit signed integer)
+
 #define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MR)   /* [%r1] */                  \
-  V(MRI)  /* [%r1 + K] */              \
-  V(MR1I) /* [%r1 + %r2 + K] */        \
+  V(MR)   /* [%r1            ] */      \
+  V(MRI)  /* [%r1         + K] */      \
+  V(MR1)  /* [%r1 + %r2*1    ] */      \
+  V(MR2)  /* [%r1 + %r2*2    ] */      \
+  V(MR4)  /* [%r1 + %r2*4    ] */      \
+  V(MR8)  /* [%r1 + %r2*8    ] */      \
+  V(MR1I) /* [%r1 + %r2*1 + K] */      \
   V(MR2I) /* [%r1 + %r2*2 + K] */      \
-  V(MR4I) /* [%r1 + %r2*4 + K] */      \
-  V(MR8I) /* [%r1 + %r2*8 + K] */
+  V(MR4I) /* [%r1 + %r2*3 + K] */      \
+  V(MR8I) /* [%r1 + %r2*4 + K] */      \
+  V(M1)   /* [      %r2*1    ] */      \
+  V(M2)   /* [      %r2*2    ] */      \
+  V(M4)   /* [      %r2*4    ] */      \
+  V(M8)   /* [      %r2*8    ] */      \
+  V(M1I)  /* [      %r2*1 + K] */      \
+  V(M2I)  /* [      %r2*2 + K] */      \
+  V(M4I)  /* [      %r2*4 + K] */      \
+  V(M8I)  /* [      %r2*8 + K] */
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/x64/instruction-selector-x64-unittest.cc b/src/compiler/x64/instruction-selector-x64-unittest.cc
deleted file mode 100644
index 22f0bce..0000000
--- a/src/compiler/x64/instruction-selector-x64-unittest.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-selector-unittest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// -----------------------------------------------------------------------------
-// Conversions.
-
-
-TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
-  StreamBuilder m(this, kMachInt64, kMachInt32);
-  m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
-}
-
-
-TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
-  StreamBuilder m(this, kMachUint64, kMachUint32);
-  m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
-}
-
-
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
-  StreamBuilder m(this, kMachInt32, kMachInt64);
-  m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
-}
-
-
-// -----------------------------------------------------------------------------
-// Loads and stores
-
-namespace {
-
-struct MemoryAccess {
-  MachineType type;
-  ArchOpcode load_opcode;
-  ArchOpcode store_opcode;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
-  OStringStream ost;
-  ost << memacc.type;
-  return os << ost.c_str();
-}
-
-
-static const MemoryAccess kMemoryAccesses[] = {
-    {kMachInt8, kX64Movsxbl, kX64Movb},
-    {kMachUint8, kX64Movzxbl, kX64Movb},
-    {kMachInt16, kX64Movsxwl, kX64Movw},
-    {kMachUint16, kX64Movzxwl, kX64Movw},
-    {kMachInt32, kX64Movl, kX64Movl},
-    {kMachUint32, kX64Movl, kX64Movl},
-    {kMachInt64, kX64Movq, kX64Movq},
-    {kMachUint64, kX64Movq, kX64Movq},
-    {kMachFloat32, kX64Movss, kX64Movss},
-    {kMachFloat64, kX64Movsd, kX64Movsd}};
-
-}  // namespace
-
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
-    InstructionSelectorMemoryAccessTest;
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
-  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(1U, s[0]->OutputCount());
-}
-
-
-TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
-  const MemoryAccess memacc = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
-  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
-  m.Return(m.Int32Constant(0));
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
-  EXPECT_EQ(3U, s[0]->InputCount());
-  EXPECT_EQ(0U, s[0]->OutputCount());
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
-                        InstructionSelectorMemoryAccessTest,
-                        ::testing::ValuesIn(kMemoryAccesses));
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 5fe7bad..aba480d 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -20,37 +20,86 @@
                                            Register::ToAllocationIndex(reg));
   }
 
-  InstructionOperand* UseByteRegister(Node* node) {
-    // TODO(dcarney): relax constraint.
-    return UseFixed(node, rdx);
-  }
-
-  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
-
   bool CanBeImmediate(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
         return true;
+      case IrOpcode::kInt64Constant: {
+        const int64_t value = OpParameter<int64_t>(node);
+        return value == static_cast<int64_t>(static_cast<int32_t>(value));
+      }
       default:
         return false;
     }
   }
 
-  bool CanBeImmediate64(Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kInt32Constant:
-        return true;
-      case IrOpcode::kNumberConstant:
-        return true;
-      case IrOpcode::kHeapConstant: {
-        // Constants in new space cannot be used as immediates in V8 because
-        // the GC does not scan code objects when collecting the new generation.
-        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
-        return !isolate()->heap()->InNewSpace(*value.handle());
+  AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
+                                             Node* base, Node* displacement,
+                                             InstructionOperand* inputs[],
+                                             size_t* input_count) {
+    AddressingMode mode = kMode_MRI;
+    if (base != NULL) {
+      inputs[(*input_count)++] = UseRegister(base);
+      if (index != NULL) {
+        DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != NULL) {
+          inputs[(*input_count)++] = UseImmediate(displacement);
+          static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+                                                       kMode_MR4I, kMode_MR8I};
+          mode = kMRnI_modes[scale_exponent];
+        } else {
+          static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+                                                      kMode_MR4, kMode_MR8};
+          mode = kMRn_modes[scale_exponent];
+        }
+      } else {
+        if (displacement == NULL) {
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = UseImmediate(displacement);
+          mode = kMode_MRI;
+        }
       }
-      default:
-        return false;
+    } else {
+      DCHECK(index != NULL);
+      DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+      inputs[(*input_count)++] = UseRegister(index);
+      if (displacement != NULL) {
+        inputs[(*input_count)++] = UseImmediate(displacement);
+        static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                    kMode_M4I, kMode_M8I};
+        mode = kMnI_modes[scale_exponent];
+      } else {
+        static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
+                                                   kMode_M4, kMode_M8};
+        mode = kMn_modes[scale_exponent];
+        if (mode == kMode_MR1) {
+          // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
+          inputs[(*input_count)++] = UseRegister(index);
+        }
+      }
     }
+    return mode;
+  }
+
+  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
+                                                  InstructionOperand* inputs[],
+                                                  size_t* input_count) {
+    BaseWithIndexAndDisplacement64Matcher m(operand, true);
+    DCHECK(m.matches());
+    if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+                                         m.displacement(), inputs, input_count);
+    } else {
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
+      return kMode_MR1;
+    }
+  }
+
+  bool CanBeBetterLeftOperand(Node* node) const {
+    return !selector()->IsLive(node);
   }
 };
 
@@ -59,11 +108,8 @@
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
   X64OperandGenerator g(this);
-  Node* base = node->InputAt(0);
-  Node* index = node->InputAt(1);
 
   ArchOpcode opcode;
-  // TODO(titzer): signed/unsigned small loads
   switch (rep) {
     case kRepFloat32:
       opcode = kX64Movss;
@@ -89,18 +135,15 @@
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(base)) {
-    // load [#base + %index]
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
-  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
-  } else {  // load [%base + %index + K]
-    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
-  }
-  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand* inputs[3];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  Emit(code, 1, outputs, input_count, inputs);
 }
 
 
@@ -124,14 +167,6 @@
     return;
   }
   DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-  InstructionOperand* val;
-  if (g.CanBeImmediate(value)) {
-    val = g.UseImmediate(value);
-  } else if (rep == kRepWord8 || rep == kRepBit) {
-    val = g.UseByteRegister(value);
-  } else {
-    val = g.UseRegister(value);
-  }
   ArchOpcode opcode;
   switch (rep) {
     case kRepFloat32:
@@ -158,18 +193,112 @@
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(base)) {
-    // store [#base + %index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
-         g.UseRegister(index), g.UseImmediate(base), val);
-  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
-         g.UseRegister(base), g.UseImmediate(index), val);
-  } else {  // store [%base + %index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
-         g.UseRegister(base), g.UseRegister(index), val);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  inputs[input_count++] = value_operand;
+  Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  X64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
   }
-  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32Matcher mlength(length);
+    Int32BinopMatcher moffset(offset);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+           g.UseRegister(moffset.left().node()),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
+      return;
+    }
+  }
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+       g.UseRegister(offset), g.TempImmediate(0), length_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  X64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32Matcher mlength(length);
+    Int32BinopMatcher moffset(offset);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, nullptr, g.UseRegister(buffer),
+           g.UseRegister(moffset.left().node()),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           value_operand);
+      return;
+    }
+  }
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+       g.TempImmediate(0), length_operand, value_operand);
 }
 
 
@@ -178,20 +307,35 @@
                        InstructionCode opcode, FlagsContinuation* cont) {
   X64OperandGenerator g(selector);
   Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
   InstructionOperand* inputs[4];
   size_t input_count = 0;
   InstructionOperand* outputs[2];
   size_t output_count = 0;
 
   // TODO(turbofan): match complex addressing modes.
-  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
-  // this might be the last use and therefore its register can be reused.
-  if (g.CanBeImmediate(m.right().node())) {
-    inputs[input_count++] = g.Use(m.left().node());
-    inputs[input_count++] = g.UseImmediate(m.right().node());
+  if (left == right) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov rax, [rbp-0x10]
+    //   add rax, [rbp-0x10]
+    //   jo label
+    InstructionOperand* const input = g.UseRegister(left);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (g.CanBeImmediate(right)) {
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.UseImmediate(right);
   } else {
-    inputs[input_count++] = g.UseRegister(m.left().node());
-    inputs[input_count++] = g.Use(m.right().node());
+    if (node->op()->HasProperty(Operator::kCommutative) &&
+        g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
+    inputs[input_count++] = g.UseRegister(left);
+    inputs[input_count++] = g.Use(right);
   }
 
   if (cont->IsBranch()) {
@@ -204,8 +348,8 @@
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
 
-  DCHECK_NE(0, input_count);
-  DCHECK_NE(0, output_count);
+  DCHECK_NE(0, static_cast<int>(input_count));
+  DCHECK_NE(0, static_cast<int>(output_count));
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
@@ -247,7 +391,7 @@
   X64OperandGenerator g(this);
   Uint32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kX64Xor32);
   }
@@ -258,33 +402,28 @@
   X64OperandGenerator g(this);
   Uint64BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kX64Xor);
   }
 }
 
 
+namespace {
+
 // Shared routine for multiple 32-bit shift operations.
 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
-static void VisitWord32Shift(InstructionSelector* selector, Node* node,
-                             ArchOpcode opcode) {
+void VisitWord32Shift(InstructionSelector* selector, Node* node,
+                      ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int32BinopMatcher m(node);
-    if (m.right().IsWord32And()) {
-      Int32BinopMatcher mright(right);
-      if (mright.right().Is(0x1F)) {
-        right = mright.left().node();
-      }
-    }
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, rcx));
   }
@@ -293,18 +432,17 @@
 
 // Shared routine for multiple 64-bit shift operations.
 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
-static void VisitWord64Shift(InstructionSelector* selector, Node* node,
-                             ArchOpcode opcode) {
+void VisitWord64Shift(InstructionSelector* selector, Node* node,
+                      ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
+  Int64BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int64BinopMatcher m(node);
     if (m.right().IsWord64And()) {
       Int64BinopMatcher mright(right);
       if (mright.right().Is(0x3F)) {
@@ -317,12 +455,54 @@
 }
 
 
+void EmitLea(InstructionSelector* selector, InstructionCode opcode,
+             Node* result, Node* index, int scale, Node* base,
+             Node* displacement) {
+  X64OperandGenerator g(selector);
+
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode = g.GenerateMemoryOperandInputs(
+      index, scale, base, displacement, inputs, &input_count);
+
+  DCHECK_NE(0, static_cast<int>(input_count));
+  DCHECK_GE(arraysize(inputs), input_count);
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(result);
+
+  opcode = AddressingModeField::encode(mode) | opcode;
+
+  selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitWord32Shl(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitWord32Shift(this, node, kX64Shl32);
 }
 
 
 void InstructionSelector::VisitWord64Shl(Node* node) {
+  X64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+    // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kX64Shl, g.DefineSameAsFirst(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
   VisitWord64Shift(this, node, kX64Shl);
 }
 
@@ -338,6 +518,18 @@
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  X64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+      return;
+    }
+  }
   VisitWord32Shift(this, node, kX64Sar32);
 }
 
@@ -358,6 +550,18 @@
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
+  X64OperandGenerator g(this);
+
+  // Try to match the Add to a leal pattern
+  BaseWithIndexAndDisplacement32Matcher m(node);
+  if (m.matches() &&
+      (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+    EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
+            m.displacement());
+    return;
+  }
+
+  // No leal pattern match, use addl
   VisitBinop(this, node, kX64Add32);
 }
 
@@ -371,8 +575,16 @@
   X64OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
+    if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+      // Turn subtractions of constant values into immediate "leal" instructions
+      // by negating the value.
+      Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(-m.right().Value()));
+      return;
+    }
     VisitBinop(this, node, kX64Sub32);
   }
 }
@@ -382,33 +594,75 @@
   X64OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+    Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
     VisitBinop(this, node, kX64Sub);
   }
 }
 
 
-static void VisitMul(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode) {
+namespace {
+
+void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
                    g.UseImmediate(right));
-  } else if (g.CanBeImmediate(left)) {
-    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
-                   g.UseImmediate(left));
   } else {
-    // TODO(turbofan): select better left operand.
+    if (g.CanBeBetterLeftOperand(right)) {
+      std::swap(left, right);
+    }
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.Use(right));
   }
 }
 
 
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+                  ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (selector->IsLive(left) && !selector->IsLive(right)) {
+    std::swap(left, right);
+  }
+  // TODO(turbofan): We use UseUniqueRegister here to improve register
+  // allocation.
+  selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
+                 g.UseUniqueRegister(right));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, rdx),
+                 g.UseFixed(node->InputAt(0), rax),
+                 g.UseUniqueRegister(node->InputAt(1)));
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitInt32Mul(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitMul(this, node, kX64Imul32);
 }
 
@@ -418,13 +672,8 @@
 }
 
 
-static void VisitDiv(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode) {
-  X64OperandGenerator g(selector);
-  InstructionOperand* temps[] = {g.TempRegister(rdx)};
-  selector->Emit(
-      opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
-      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kX64ImulHigh32);
 }
 
 
@@ -438,26 +687,16 @@
 }
 
 
-void InstructionSelector::VisitInt32UDiv(Node* node) {
+void InstructionSelector::VisitUint32Div(Node* node) {
   VisitDiv(this, node, kX64Udiv32);
 }
 
 
-void InstructionSelector::VisitInt64UDiv(Node* node) {
+void InstructionSelector::VisitUint64Div(Node* node) {
   VisitDiv(this, node, kX64Udiv);
 }
 
 
-static void VisitMod(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode) {
-  X64OperandGenerator g(selector);
-  InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
-  selector->Emit(
-      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
-      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
 void InstructionSelector::VisitInt32Mod(Node* node) {
   VisitMod(this, node, kX64Idiv32);
 }
@@ -468,16 +707,27 @@
 }
 
 
-void InstructionSelector::VisitInt32UMod(Node* node) {
+void InstructionSelector::VisitUint32Mod(Node* node) {
   VisitMod(this, node, kX64Udiv32);
 }
 
 
-void InstructionSelector::VisitInt64UMod(Node* node) {
+void InstructionSelector::VisitUint64Mod(Node* node) {
   VisitMod(this, node, kX64Udiv);
 }
 
 
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kX64UmulHigh32);
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   X64OperandGenerator g(this);
   Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -486,9 +736,7 @@
 
 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
   X64OperandGenerator g(this);
-  // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
-  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
 
@@ -512,41 +760,115 @@
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kWord32Equal:
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kUint32MulHigh: {
+      // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+      // zero-extension is a no-op.
+      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      return;
+    }
+    default:
+      break;
+  }
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
 
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord64Sar:
+      case IrOpcode::kWord64Shr: {
+        Int64BinopMatcher m(value);
+        if (m.right().Is(32)) {
+          Emit(kX64Shr, g.DefineSameAsFirst(node),
+               g.UseRegister(m.left().node()), g.TempImmediate(32));
+          return;
+        }
+        break;
+      }
+      default:
+        break;
+    }
+  }
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Add, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Div(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Div, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
@@ -565,123 +887,56 @@
 }
 
 
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kX64Add32, cont);
-}
+namespace {
 
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
-                                                    FlagsContinuation* cont) {
-  VisitBinop(this, node, kX64Sub32, cont);
-}
-
-
-// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
-                         InstructionOperand* left, InstructionOperand* right,
-                         FlagsContinuation* cont) {
+void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
   X64OperandGenerator g(selector);
-  opcode = cont->Encode(opcode);
-  if (cont->IsBranch()) {
-    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
-                   g.Label(cont->false_block()))->MarkAsControl();
-  } else {
-    DCHECK(cont->IsSet());
-    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
-  }
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+}  // namespace
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Floor, node);
 }
 
 
-// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
-                             InstructionCode opcode, FlagsContinuation* cont,
-                             bool commutative) {
-  X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
-    if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
-  }
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Ceil, node);
 }
 
 
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kInt32Sub:
-      return VisitWordCompare(this, node, kX64Cmp32, cont, false);
-    case IrOpcode::kWord32And:
-      return VisitWordCompare(this, node, kX64Test32, cont, true);
-    default:
-      break;
-  }
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
 
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
   X64OperandGenerator g(this);
-  VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
-}
-
-
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
-  switch (node->opcode()) {
-    case IrOpcode::kInt64Sub:
-      return VisitWordCompare(this, node, kX64Cmp, cont, false);
-    case IrOpcode::kWord64And:
-      return VisitWordCompare(this, node, kX64Test, cont, true);
-    default:
-      break;
-  }
-
-  X64OperandGenerator g(this);
-  VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
-}
-
-
-void InstructionSelector::VisitWord32Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kX64Cmp32, cont, false);
-}
-
-
-void InstructionSelector::VisitWord64Compare(Node* node,
-                                             FlagsContinuation* cont) {
-  VisitWordCompare(this, node, kX64Cmp, cont, false);
-}
-
-
-void InstructionSelector::VisitFloat64Compare(Node* node,
-                                              FlagsContinuation* cont) {
-  X64OperandGenerator g(this);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
-}
-
-
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
-                                    BasicBlock* deoptimization) {
-  X64OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
     frame_state_descriptor = GetFrameStateDescriptor(
-        call->InputAt(static_cast<int>(descriptor->InputCount())));
+        node->InputAt(static_cast<int>(descriptor->InputCount())));
   }
 
   CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
 
   // Compute InstructionOperands for inputs and outputs.
-  InitializeCallBuffer(call, &buffer, true, true);
+  InitializeCallBuffer(node, &buffer, true, true);
 
-  // TODO(dcarney): stack alignment for c calls.
-  // TODO(dcarney): shadow space on window for c calls.
   // Push any stack arguments.
   for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
        input != buffer.pushed_nodes.rend(); input++) {
@@ -707,17 +962,358 @@
   opcode |= MiscField::encode(descriptor->flags());
 
   // Emit the call instruction.
+  InstructionOperand** first_output =
+      buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
   Instruction* call_instr =
-      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+      Emit(opcode, buffer.outputs.size(), first_output,
            buffer.instruction_args.size(), &buffer.instruction_args.front());
-
   call_instr->MarkAsCall();
-  if (deoptimization != NULL) {
-    DCHECK(continuation != NULL);
-    call_instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
   }
 }
 
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         Node* left, Node* right, FlagsContinuation* cont,
+                         bool commutative) {
+  X64OperandGenerator g(selector);
+  if (commutative && g.CanBeBetterLeftOperand(right)) {
+    std::swap(left, right);
+  }
+  VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  Node* const left = node->InputAt(0);
+  Node* const right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, left, right, cont,
+                 node->op()->HasProperty(Operator::kCommutative));
+  }
+}
+
+
+// Shared routine for comparison with zero.
+static void VisitCompareZero(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                                FlagsContinuation* cont) {
+  VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
+               cont, node->op()->HasProperty(Operator::kCommutative));
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  X64OperandGenerator g(this);
+  Node* user = branch;
+  Node* value = branch->InputAt(0);
+
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  // Try to combine the branch with a comparison.
+  if (CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kInt32LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kUint32LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kWord64Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(this, value, kX64Cmp, &cont);
+      case IrOpcode::kInt64LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(this, value, kX64Cmp, &cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(this, value, kX64Cmp, &cont);
+      case IrOpcode::kUint64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(this, value, kX64Cmp, &cont);
+      case IrOpcode::kFloat64Equal:
+        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kFloat64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(this, value, &cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(this, node, kX64Add32, &cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(this, node, kX64Sub32, &cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Sub:
+        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kInt64Sub:
+        return VisitWordCompare(this, value, kX64Cmp, &cont);
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(this, value, kX64Test32, &cont);
+      case IrOpcode::kWord64And:
+        return VisitWordCompare(this, value, kX64Test, &cont);
+      default:
+        break;
+    }
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  VisitCompareZero(this, value, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  Node* user = node;
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(user);
+  if (m.right().Is(0)) {
+    Node* value = m.left().node();
+
+    // Try to combine with comparisons against 0 by simply inverting the branch.
+    while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    }
+
+    // Try to combine the branch with a comparison.
+    if (CanCover(user, value)) {
+      switch (value->opcode()) {
+        case IrOpcode::kInt32Sub:
+          return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        case IrOpcode::kWord32And:
+          return VisitWordCompare(this, value, kX64Test32, &cont);
+        default:
+          break;
+      }
+    }
+    return VisitCompareZero(this, value, kX64Cmp32, &cont);
+  }
+  VisitWordCompare(this, node, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, kX64Cmp32, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  Node* user = node;
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(user);
+  if (m.right().Is(0)) {
+    Node* value = m.left().node();
+
+    // Try to combine with comparisons against 0 by simply inverting the branch.
+    while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    }
+
+    // Try to combine the branch with a comparison.
+    if (CanCover(user, value)) {
+      switch (value->opcode()) {
+        case IrOpcode::kInt64Sub:
+          return VisitWordCompare(this, value, kX64Cmp, &cont);
+        case IrOpcode::kWord64And:
+          return VisitWordCompare(this, value, kX64Test, &cont);
+        default:
+          break;
+      }
+    }
+    return VisitCompareZero(this, value, kX64Cmp, &cont);
+  }
+  VisitWordCompare(this, node, kX64Cmp, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    VisitBinop(this, node, kX64Add32, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kX64Add32, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Sub32, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kX64Sub32, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWordCompare(this, node, kX64Cmp, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWordCompare(this, node, kX64Cmp, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWordCompare(this, node, kX64Cmp, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate |
+           MachineOperatorBuilder::kWord32ShiftIsSafe;
+  }
+  return MachineOperatorBuilder::kNoFlags;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
index 8175bc6..0b76cc7 100644
--- a/src/compiler/x64/linkage-x64.cc
+++ b/src/compiler/x64/linkage-x64.cc
@@ -49,8 +49,9 @@
 
 typedef LinkageHelper<X64LinkageHelperTraits> LH;
 
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }
 
 
@@ -63,10 +64,10 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
new file mode 100644
index 0000000..179988d
--- /dev/null
+++ b/src/compiler/zone-pool.cc
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/zone-pool.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
+    : zone_pool_(zone_pool),
+      total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
+      max_allocated_bytes_(0) {
+  zone_pool_->stats_.push_back(this);
+  for (auto zone : zone_pool_->used_) {
+    size_t size = static_cast<size_t>(zone->allocation_size());
+    std::pair<InitialValues::iterator, bool> res =
+        initial_values_.insert(std::make_pair(zone, size));
+    USE(res);
+    DCHECK(res.second);
+  }
+}
+
+
+ZonePool::StatsScope::~StatsScope() {
+  DCHECK_EQ(zone_pool_->stats_.back(), this);
+  zone_pool_->stats_.pop_back();
+}
+
+
+size_t ZonePool::StatsScope::GetMaxAllocatedBytes() {
+  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+
+size_t ZonePool::StatsScope::GetCurrentAllocatedBytes() {
+  size_t total = 0;
+  for (Zone* zone : zone_pool_->used_) {
+    total += static_cast<size_t>(zone->allocation_size());
+    // Adjust for initial values.
+    InitialValues::iterator it = initial_values_.find(zone);
+    if (it != initial_values_.end()) {
+      total -= it->second;
+    }
+  }
+  return total;
+}
+
+
+size_t ZonePool::StatsScope::GetTotalAllocatedBytes() {
+  return zone_pool_->GetTotalAllocatedBytes() - total_allocated_bytes_at_start_;
+}
+
+
+void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
+  size_t current_total = GetCurrentAllocatedBytes();
+  // Update max.
+  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+  // Drop zone from initial value map.
+  InitialValues::iterator it = initial_values_.find(zone);
+  if (it != initial_values_.end()) {
+    initial_values_.erase(it);
+  }
+}
+
+
+ZonePool::ZonePool(Isolate* isolate)
+    : isolate_(isolate), max_allocated_bytes_(0), total_deleted_bytes_(0) {}
+
+
+ZonePool::~ZonePool() {
+  DCHECK(used_.empty());
+  DCHECK(stats_.empty());
+  for (Zone* zone : unused_) {
+    delete zone;
+  }
+}
+
+
+size_t ZonePool::GetMaxAllocatedBytes() {
+  return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
+}
+
+
+size_t ZonePool::GetCurrentAllocatedBytes() {
+  size_t total = 0;
+  for (Zone* zone : used_) {
+    total += static_cast<size_t>(zone->allocation_size());
+  }
+  return total;
+}
+
+
+size_t ZonePool::GetTotalAllocatedBytes() {
+  return total_deleted_bytes_ + GetCurrentAllocatedBytes();
+}
+
+
+Zone* ZonePool::NewEmptyZone() {
+  Zone* zone;
+  // Grab a zone from pool if possible.
+  if (!unused_.empty()) {
+    zone = unused_.back();
+    unused_.pop_back();
+  } else {
+    zone = new Zone(isolate_);
+  }
+  used_.push_back(zone);
+  DCHECK_EQ(0, zone->allocation_size());
+  return zone;
+}
+
+
+void ZonePool::ReturnZone(Zone* zone) {
+  size_t current_total = GetCurrentAllocatedBytes();
+  // Update max.
+  max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
+  // Update stats.
+  for (auto stat_scope : stats_) {
+    stat_scope->ZoneReturned(zone);
+  }
+  // Remove from used.
+  Used::iterator it = std::find(used_.begin(), used_.end(), zone);
+  DCHECK(it != used_.end());
+  used_.erase(it);
+  total_deleted_bytes_ += static_cast<size_t>(zone->allocation_size());
+  // Delete zone or clear and stash on unused_.
+  if (unused_.size() >= kMaxUnusedSize) {
+    delete zone;
+  } else {
+    zone->DeleteAll();
+    DCHECK_EQ(0, zone->allocation_size());
+    unused_.push_back(zone);
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-pool.h
new file mode 100644
index 0000000..8b43265
--- /dev/null
+++ b/src/compiler/zone-pool.h
@@ -0,0 +1,93 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ZONE_POOL_H_
+#define V8_COMPILER_ZONE_POOL_H_
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ZonePool FINAL {
+ public:
+  class Scope FINAL {
+   public:
+    explicit Scope(ZonePool* zone_pool) : zone_pool_(zone_pool), zone_(NULL) {}
+    ~Scope() { Destroy(); }
+
+    Zone* zone() {
+      if (zone_ == NULL) zone_ = zone_pool_->NewEmptyZone();
+      return zone_;
+    }
+    void Destroy() {
+      if (zone_ != NULL) zone_pool_->ReturnZone(zone_);
+      zone_ = NULL;
+    }
+
+   private:
+    ZonePool* const zone_pool_;
+    Zone* zone_;
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+  class StatsScope FINAL {
+   public:
+    explicit StatsScope(ZonePool* zone_pool);
+    ~StatsScope();
+
+    size_t GetMaxAllocatedBytes();
+    size_t GetCurrentAllocatedBytes();
+    size_t GetTotalAllocatedBytes();
+
+   private:
+    friend class ZonePool;
+    void ZoneReturned(Zone* zone);
+
+    typedef std::map<Zone*, size_t> InitialValues;
+
+    ZonePool* const zone_pool_;
+    InitialValues initial_values_;
+    size_t total_allocated_bytes_at_start_;
+    size_t max_allocated_bytes_;
+
+    DISALLOW_COPY_AND_ASSIGN(StatsScope);
+  };
+
+  explicit ZonePool(Isolate* isolate);
+  ~ZonePool();
+
+  size_t GetMaxAllocatedBytes();
+  size_t GetTotalAllocatedBytes();
+  size_t GetCurrentAllocatedBytes();
+
+ private:
+  Zone* NewEmptyZone();
+  void ReturnZone(Zone* zone);
+
+  static const size_t kMaxUnusedSize = 3;
+  typedef std::vector<Zone*> Unused;
+  typedef std::vector<Zone*> Used;
+  typedef std::vector<StatsScope*> Stats;
+
+  Isolate* const isolate_;
+  Unused unused_;
+  Used used_;
+  Stats stats_;
+  size_t max_allocated_bytes_;
+  size_t total_deleted_bytes_;
+
+  DISALLOW_COPY_AND_ASSIGN(ZonePool);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif