Upgrade to 3.29

Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.

Bug: 17370214

Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
new file mode 100644
index 0000000..ac9cfa8
--- /dev/null
+++ b/src/compiler/access-builder.cc
@@ -0,0 +1,97 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+FieldAccess AccessBuilder::ForMap() {
+  return {kTaggedBase, HeapObject::kMapOffset, Handle<Name>(), Type::Any(),
+          kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectProperties() {
+  return {kTaggedBase, JSObject::kPropertiesOffset, Handle<Name>(), Type::Any(),
+          kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectElements() {
+  return {kTaggedBase, JSObject::kElementsOffset, Handle<Name>(),
+          Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionContext() {
+  return {kTaggedBase, JSFunction::kContextOffset, Handle<Name>(),
+          Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
+  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, Handle<Name>(),
+          Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForExternalArrayPointer() {
+  return {kTaggedBase, ExternalArray::kExternalPointerOffset, Handle<Name>(),
+          Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedArrayElement() {
+  return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForBackingStoreElement(MachineType rep) {
+  return {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+          rep};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+                                                  bool is_external) {
+  BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
+  int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
+  switch (type) {
+    case kExternalInt8Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt8};
+    case kExternalUint8Array:
+    case kExternalUint8ClampedArray:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
+    case kExternalInt16Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt16};
+    case kExternalUint16Array:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
+    case kExternalInt32Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt32};
+    case kExternalUint32Array:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
+    case kExternalFloat32Array:
+      return {taggedness, header_size, Type::Number(), kRepFloat32};
+    case kExternalFloat64Array:
+      return {taggedness, header_size, Type::Number(), kRepFloat64};
+  }
+  UNREACHABLE();
+  return {kUntaggedBase, 0, Type::None(), kMachNone};
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
new file mode 100644
index 0000000..7d0bda1
--- /dev/null
+++ b/src/compiler/access-builder.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_BUILDER_H_
+#define V8_COMPILER_ACCESS_BUILDER_H_
+
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// This access builder provides a set of static methods constructing commonly
+// used FieldAccess and ElementAccess descriptors. These descriptors server as
+// parameters to simplified load/store operators.
+class AccessBuilder FINAL : public AllStatic {
+ public:
+  // Provides access to HeapObject::map() field.
+  static FieldAccess ForMap();
+
+  // Provides access to JSObject::properties() field.
+  static FieldAccess ForJSObjectProperties();
+
+  // Provides access to JSObject::elements() field.
+  static FieldAccess ForJSObjectElements();
+
+  // Provides access to JSFunction::context() field.
+  static FieldAccess ForJSFunctionContext();
+
+  // Provides access to JSArrayBuffer::backing_store() field.
+  static FieldAccess ForJSArrayBufferBackingStore();
+
+  // Provides access to ExternalArray::external_pointer() field.
+  static FieldAccess ForExternalArrayPointer();
+
+  // Provides access to FixedArray elements.
+  static ElementAccess ForFixedArrayElement();
+
+  // TODO(mstarzinger): Raw access only for testing, drop me.
+  static ElementAccess ForBackingStoreElement(MachineType rep);
+
+  // Provides access to Fixed{type}TypedArray and External{type}Array elements.
+  static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+                                            bool is_external);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_ACCESS_BUILDER_H_
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
new file mode 100644
index 0000000..1ec174d
--- /dev/null
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -0,0 +1,876 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+  ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  SBit OutputSBit() const {
+    switch (instr_->flags_mode()) {
+      case kFlags_branch:
+      case kFlags_set:
+        return SetCC;
+      case kFlags_none:
+        return LeaveCC;
+    }
+    UNREACHABLE();
+    return LeaveCC;
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  Operand InputOperand2(int first_index) {
+    const int index = first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Offset_RI:
+      case kMode_Offset_RR:
+        break;
+      case kMode_Operand2_I:
+        return InputImmediate(index + 0);
+      case kMode_Operand2_R:
+        return Operand(InputRegister(index + 0));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_R:
+        return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSL_R:
+        return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_R:
+        return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
+      case kMode_Operand2_R_ROR_R:
+        return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand InputOffset(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Operand2_I:
+      case kMode_Operand2_R:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ASR_R:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSL_R:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_LSR_R:
+      case kMode_Operand2_R_ROR_I:
+      case kMode_Operand2_R_ROR_R:
+        break;
+      case kMode_Offset_RI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_Offset_RR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand InputOffset() {
+    int index = 0;
+    return InputOffset(&index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  ArmOperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ add(ip, i.InputRegister(0),
+               Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Call(ip);
+      }
+      AddSafepointAndDeopt(instr);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+      AddSafepointAndDeopt(instr);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArchJmp:
+      __ b(code_->GetLabel(i.InputBlock(0)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchRet:
+      AssembleReturn();
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmAdd:
+      __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmAnd:
+      __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+              i.OutputSBit());
+      break;
+    case kArmBic:
+      __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmMul:
+      __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.OutputSBit());
+      break;
+    case kArmMla:
+      __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2), i.OutputSBit());
+      break;
+    case kArmMls: {
+      CpuFeatureScope scope(masm(), MLS);
+      __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmSdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmMov:
+      __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+      break;
+    case kArmMvn:
+      __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+      break;
+    case kArmOrr:
+      __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmEor:
+      __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmSub:
+      __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmRsb:
+      __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmBfc: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUbfx: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCmp:
+      __ cmp(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmCmn:
+      __ cmn(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTst:
+      __ tst(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTeq:
+      __ teq(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVcmpF64:
+      __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+                               i.InputDoubleRegister(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVaddF64:
+      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVsubF64:
+      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmulF64:
+      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlaF64:
+      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlsF64:
+      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVdivF64:
+      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmodF64: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVnegF64:
+      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArmVsqrtF64:
+      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArmVcvtF64S32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF64U32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtS32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtU32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdrb:
+      __ ldrb(i.OutputRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmLdrsb:
+      __ ldrsb(i.OutputRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmStrb: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strb(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdrh:
+      __ ldrh(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmLdrsh:
+      __ ldrsh(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStrh: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strh(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdr:
+      __ ldr(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStr: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ str(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVldr32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vldr(scratch, i.InputOffset());
+      __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVstr32: {
+      int index = 0;
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      MemOperand operand = i.InputOffset(&index);
+      __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
+      __ vstr(scratch, operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVldr64:
+      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVstr64: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ vstr(i.InputDoubleRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmPush:
+      __ Push(i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmStoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ add(index, object, index);
+      __ str(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      __ RecordWrite(object, index, value, lr_status, mode);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ b(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ b(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ b(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ b(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ b(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ b(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ b(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ b(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ b(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ b(hi, tlabel);
+      break;
+    case kOverflow:
+      __ b(vs, tlabel);
+      break;
+    case kNotOverflow:
+      __ b(vc, tlabel);
+      break;
+  }
+  if (!fallthru) __ b(flabel);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+    case kOverflow:
+      cc = vs;
+      break;
+    case kNotOverflow:
+      cc = vc;
+      break;
+  }
+  __ bind(&check);
+  __ mov(reg, Operand(0));
+  __ mov(reg, Operand(1), LeaveCC, cc);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    bool saved_pp;
+    if (FLAG_enable_ool_constant_pool) {
+      __ Push(lr, fp, pp);
+      // Adjust FP to point to saved FP.
+      __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      saved_pp = true;
+    } else {
+      __ Push(lr, fp);
+      __ mov(fp, sp);
+      saved_pp = false;
+    }
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0 || saved_pp) {
+      // Save callee-saved registers.
+      int register_save_area_size = saved_pp ? kPointerSize : 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ stm(db_w, sp, saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+      __ b(ne, &ok);
+      __ ldr(r2, GlobalObjectOperand());
+      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+      __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ ldm(ia_w, sp, saves);
+      }
+    }
+    __ LeaveFrame(StackFrame::MANUAL);
+    __ Ret();
+  } else {
+    __ LeaveFrame(StackFrame::MANUAL);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ str(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ldr(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ ldr(temp, src);
+      __ str(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      Constant src = g.ToConstant(source);
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          UNREACHABLE();
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ Move(dst, src.ToHeapObject());
+          break;
+      }
+      if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
+    } else if (destination->IsDoubleRegister()) {
+      DwVfpRegister result = g.ToDoubleRegister(destination);
+      __ vmov(result, g.ToDouble(source));
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vmov(temp, g.ToDouble(source));
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ vstr(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ vldr(g.ToDoubleRegister(destination), src);
+    } else {
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vldr(temp, src);
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ ldr(src, dst);
+      __ str(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    SwVfpRegister temp_1 = kScratchDoubleReg.low();
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ ldr(temp_0, src);
+    __ vldr(temp_1, dst);
+    __ str(temp_0, dst);
+    __ vstr(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister temp = kScratchDoubleReg;
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ vldr(src, dst);
+      __ vstr(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    DwVfpRegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+    __ vldr(temp_1, dst0);  // Save destination in temp_1.
+    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+    __ str(temp_0, dst0);
+    __ ldr(temp_0, src1);
+    __ str(temp_0, dst1);
+    __ vstr(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // On 32-bit ARM we do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block literal pool emission for duration of padding.
+      v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
new file mode 100644
index 0000000..7849ca9
--- /dev/null
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(ArmAdd)                        \
+  V(ArmAnd)                        \
+  V(ArmBic)                        \
+  V(ArmCmp)                        \
+  V(ArmCmn)                        \
+  V(ArmTst)                        \
+  V(ArmTeq)                        \
+  V(ArmOrr)                        \
+  V(ArmEor)                        \
+  V(ArmSub)                        \
+  V(ArmRsb)                        \
+  V(ArmMul)                        \
+  V(ArmMla)                        \
+  V(ArmMls)                        \
+  V(ArmSdiv)                       \
+  V(ArmUdiv)                       \
+  V(ArmMov)                        \
+  V(ArmMvn)                        \
+  V(ArmBfc)                        \
+  V(ArmUbfx)                       \
+  V(ArmVcmpF64)                    \
+  V(ArmVaddF64)                    \
+  V(ArmVsubF64)                    \
+  V(ArmVmulF64)                    \
+  V(ArmVmlaF64)                    \
+  V(ArmVmlsF64)                    \
+  V(ArmVdivF64)                    \
+  V(ArmVmodF64)                    \
+  V(ArmVnegF64)                    \
+  V(ArmVsqrtF64)                   \
+  V(ArmVcvtF64S32)                 \
+  V(ArmVcvtF64U32)                 \
+  V(ArmVcvtS32F64)                 \
+  V(ArmVcvtU32F64)                 \
+  V(ArmVldr32)                     \
+  V(ArmVstr32)                     \
+  V(ArmVldr64)                     \
+  V(ArmVstr64)                     \
+  V(ArmLdrb)                       \
+  V(ArmLdrsb)                      \
+  V(ArmStrb)                       \
+  V(ArmLdrh)                       \
+  V(ArmLdrsh)                      \
+  V(ArmStrh)                       \
+  V(ArmLdr)                        \
+  V(ArmStr)                        \
+  V(ArmPush)                       \
+  V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(Offset_RI)        /* [%r0 + K] */   \
+  V(Offset_RR)        /* [%r0 + %r1] */ \
+  V(Operand2_I)       /* K */           \
+  V(Operand2_R)       /* %r0 */         \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ROR_I) /* %r0 ROR K */   \
+  V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+  V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+  V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \
+  V(Operand2_R_ROR_R) /* %r0 ROR %r1 */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/src/compiler/arm/instruction-selector-arm-unittest.cc b/src/compiler/arm/instruction-selector-arm-unittest.cc
new file mode 100644
index 0000000..208d2e9
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm-unittest.cc
@@ -0,0 +1,1900 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+// Data processing instructions.
+struct DPI {
+  Constructor constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+  ArchOpcode test_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
+  return os << dpi.constructor_name;
+}
+
+
+static const DPI kDPIs[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+
+
+// Data processing instructions with overflow.
+struct ODPI {
+  Constructor constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
+  return os << odpi.constructor_name;
+}
+
+
+static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+                               "Int32AddWithOverflow", kArmAdd, kArmAdd},
+                              {&RawMachineAssembler::Int32SubWithOverflow,
+                               "Int32SubWithOverflow", kArmSub, kArmRsb}};
+
+
+// Shifts.
+struct Shift {
+  Constructor constructor;
+  const char* constructor_name;
+  int32_t i_low;          // lowest possible immediate
+  int32_t i_high;         // highest possible immediate
+  AddressingMode i_mode;  // Operand2_R_<shift>_I
+  AddressingMode r_mode;  // Operand2_R_<shift>_R
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+  return os << shift.constructor_name;
+}
+
+
+static const Shift kShifts[] = {
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+     kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+     kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+     kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+     kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+    -2147483617, -2147483606, -2113929216, -2080374784, -1996488704,
+    -1879048192, -1459617792, -1358954496, -1342177265, -1275068414,
+    -1073741818, -1073741777, -855638016,  -805306368,  -402653184,
+    -268435444,  -16777216,   0,           35,          61,
+    105,         116,         171,         245,         255,
+    692,         1216,        1248,        1520,        1600,
+    1888,        3744,        4080,        5888,        8384,
+    9344,        9472,        9792,        13312,       15040,
+    15360,       20736,       22272,       23296,       32000,
+    33536,       37120,       45824,       47872,       56320,
+    59392,       65280,       72704,       101376,      147456,
+    161792,      164864,      167936,      173056,      195584,
+    209920,      212992,      356352,      655360,      704512,
+    716800,      851968,      901120,      1044480,     1523712,
+    2572288,     3211264,     3588096,     3833856,     3866624,
+    4325376,     5177344,     6488064,     7012352,     7471104,
+    14090240,    16711680,    19398656,    22282240,    28573696,
+    30408704,    30670848,    43253760,    54525952,    55312384,
+    56623104,    68157440,    115343360,   131072000,   187695104,
+    188743680,   195035136,   197132288,   203423744,   218103808,
+    267386880,   268435470,   285212672,   402653185,   415236096,
+    595591168,   603979776,   603979778,   629145600,   1073741835,
+    1073741855,  1073741861,  1073741884,  1157627904,  1476395008,
+    1476395010,  1610612741,  2030043136,  2080374785,  2097152000};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions.
+
+
+typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
+
+
+TEST_P(InstructionSelectorDPITest, Parameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorDPITest, Immediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(
+        m.Parameter(0),
+        (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(
+        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+        m.Parameter(2)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.*dpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.*dpi.constructor)(
+          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+          m.Parameter(1)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
+             &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
+             &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(
+                 m.Parameter(0),
+                 (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+                 m.Parameter(2)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      MLabel a, b;
+      m.Branch((m.*dpi.constructor)(m.Parameter(0),
+                                    (m.*shift.constructor)(
+                                        m.Parameter(1), m.Int32Constant(imm))),
+               &a, &b);
+      m.Bind(&a);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&b);
+      m.Return(m.Int32Constant(0));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(5U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      MLabel a, b;
+      m.Branch((m.*dpi.constructor)(
+                   (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                   m.Parameter(1)),
+               &a, &b);
+      m.Bind(&a);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&b);
+      m.Return(m.Int32Constant(0));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(5U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+                         m.Int32Constant(0)),
+           &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch(
+      m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+                       m.Int32Constant(0)),
+      &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32Equal(
+                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32Equal(
+                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32NotEqual(
+                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32NotEqual(
+                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+                        ::testing::ValuesIn(kDPIs));
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(
+               m.Parameter(0),
+               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(
+               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+               m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          1, (m.*odpi.constructor)(m.Parameter(0),
+                                   (m.*shift.constructor)(
+                                       m.Parameter(1), m.Int32Constant(imm)))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          1, (m.*odpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+                 m.Parameter(0))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(
+               m.Parameter(0),
+               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(
+               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+               m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          0, (m.*odpi.constructor)(m.Parameter(0),
+                                   (m.*shift.constructor)(
+                                       m.Parameter(1), m.Int32Constant(imm)))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          0, (m.*odpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+                 m.Parameter(0))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+  Stream s = m.Build();
+  ASSERT_LE(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(
+        m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(
+        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      Node* n = (m.*odpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
+      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+      Stream s = m.Build();
+      ASSERT_LE(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(2U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      Node* n = (m.*odpi.constructor)(
+          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+          m.Parameter(1));
+      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+      Stream s = m.Build();
+      ASSERT_LE(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(2U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Projection(1, n), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(0));
+  m.Bind(&b);
+  m.Return(m.Projection(0, n));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Projection(0, n));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Projection(0, n));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+                        ::testing::ValuesIn(kODPIs));
+
+
+// -----------------------------------------------------------------------------
+// Shifts.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
+  const Shift shift = GetParam();
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Word32Equal(m.Parameter(0),
+                      (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
+                      m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+        m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        m.Parameter(0),
+        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Word32Equal(m.Int32Constant(0),
+                    (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        m.Int32Constant(0),
+        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Not(
+        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
+                                           m.Parameter(1), m.Parameter(2)))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Not((m.*shift.constructor)(
+                             m.Parameter(1), m.Int32Constant(imm)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShifts));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode ldr_opcode;
+  ArchOpcode str_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8,
+     kArmLdrsb,
+     kArmStrb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint8,
+     kArmLdrb,
+     kArmStrb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
+      -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
+      39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
+    {kMachInt16,
+     kArmLdrsh,
+     kArmStrh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
+      -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
+      102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
+    {kMachUint16,
+     kArmLdrh,
+     kArmStrh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
+      -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
+      114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
+    {kMachInt32,
+     kArmLdr,
+     kArmStr,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
+      -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
+      93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+    {kMachFloat32,
+     kArmVldr32,
+     kArmVstr32,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+      -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+      24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
+    {kMachFloat64,
+     kArmVldr64,
+     kArmVstr64,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
+      -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
+      108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Int32Mul(m.Parameter(1), m.Parameter(2)), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(4U, s.size());
+  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(6U, s.size());
+  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+  ASSERT_EQ(1U, s[4]->OutputCount());
+  ASSERT_EQ(2U, s[4]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+  ASSERT_EQ(1U, s[5]->OutputCount());
+  ASSERT_EQ(2U, s[5]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(MLS, SUDIV);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(3U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+  // x * (2^k + 1) -> x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 1, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // x * (2^k - 1) -> -x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 3, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // (2^k + 1) * x -> x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 1, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // x * (2^k - 1) -> -x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 3, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmSub, s[1]->arch_opcode());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  Stream s = m.Build(MLS);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMls, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(3U, s[0]->InputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(4U, s.size());
+  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(6U, s.size());
+  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+  ASSERT_EQ(1U, s[4]->OutputCount());
+  ASSERT_EQ(2U, s[4]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+  ASSERT_EQ(1U, s[5]->OutputCount());
+  ASSERT_EQ(2U, s[5]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIVAndMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(MLS, SUDIV);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(3U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, width, 1, 32) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Int32Constant(0xffffffffu >> (32 - width))));
+    Stream s = m.Build(ARMv7);
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+  }
+  TRACED_FORRANGE(int32_t, width, 1, 32) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                         m.Parameter(0)));
+    Stream s = m.Build(ARMv7);
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(
+          m.Parameter(0),
+          m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+      ASSERT_EQ(1U, s[0]->OutputCount());
+      EXPECT_TRUE(
+          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(
+          m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+                      m.Parameter(0)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+      ASSERT_EQ(1U, s[0]->OutputCount());
+      EXPECT_TRUE(
+          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      uint32_t max = 1 << lsb;
+      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+      uint32_t jnk = rng()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+                           m.Int32Constant(lsb)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      uint32_t max = 1 << lsb;
+      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+      uint32_t jnk = rng()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+                           m.Int32Constant(lsb)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    if (imm == 0) continue;
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    if (imm == 0) continue;
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  m.Return(m.Word32Not(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+                           m.Int32Constant(0xffffffffu >> (32 - width))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                           m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
new file mode 100644
index 0000000..ae93b27
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -0,0 +1,950 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit ArmOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    Int32Matcher m(node);
+    if (!m.HasValue()) return false;
+    int32_t value = m.Value();
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kArmAnd:
+      case kArmMov:
+      case kArmMvn:
+      case kArmBic:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(~value);
+
+      case kArmAdd:
+      case kArmSub:
+      case kArmCmp:
+      case kArmCmn:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(-value);
+
+      case kArmTst:
+      case kArmTeq:
+      case kArmOrr:
+      case kArmEor:
+      case kArmRsb:
+        return ImmediateFitsAddrMode1Instruction(value);
+
+      case kArmVldr32:
+      case kArmVstr32:
+      case kArmVldr64:
+      case kArmVstr64:
+        return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+      case kArmLdrb:
+      case kArmLdrsb:
+      case kArmStrb:
+      case kArmLdr:
+      case kArmStr:
+      case kArmStoreWriteBarrier:
+        return value >= -4095 && value <= 4095;
+
+      case kArmLdrh:
+      case kArmLdrsh:
+      case kArmStrh:
+        return value >= -255 && value <= 255;
+
+      case kArchCallCodeObject:
+      case kArchCallJSFunction:
+      case kArchJmp:
+      case kArchNop:
+      case kArchRet:
+      case kArchTruncateDoubleToI:
+      case kArmMul:
+      case kArmMla:
+      case kArmMls:
+      case kArmSdiv:
+      case kArmUdiv:
+      case kArmBfc:
+      case kArmUbfx:
+      case kArmVcmpF64:
+      case kArmVaddF64:
+      case kArmVsubF64:
+      case kArmVmulF64:
+      case kArmVmlaF64:
+      case kArmVmlsF64:
+      case kArmVdivF64:
+      case kArmVmodF64:
+      case kArmVnegF64:
+      case kArmVsqrtF64:
+      case kArmVcvtF64S32:
+      case kArmVcvtF64U32:
+      case kArmVcvtS32F64:
+      case kArmVcvtU32F64:
+      case kArmPush:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+  }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static bool TryMatchROR(InstructionSelector* selector,
+                        InstructionCode* opcode_return, Node* node,
+                        InstructionOperand** value_return,
+                        InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Ror) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 31)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchASR(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Sar) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 32)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchLSL(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Shl) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(0, 31)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchLSR(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Shr) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 32)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchShift(InstructionSelector* selector,
+                                 InstructionCode* opcode_return, Node* node,
+                                 InstructionOperand** value_return,
+                                 InstructionOperand** shift_return) {
+  return (
+      TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchROR(selector, opcode_return, node, value_return, shift_return));
+}
+
+
+static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
+                                            InstructionCode* opcode_return,
+                                            Node* node,
+                                            size_t* input_count_return,
+                                            InstructionOperand** inputs) {
+  ArmOperandGenerator g(selector);
+  if (g.CanBeImmediate(node, *opcode_return)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
+    inputs[0] = g.UseImmediate(node);
+    *input_count_return = 1;
+    return true;
+  }
+  if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
+    *input_count_return = 2;
+    return true;
+  }
+  return false;
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, InstructionCode reverse_opcode,
+                       FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[5];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                               &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+                                      m.left().node(), &input_count,
+                                      &inputs[1])) {
+    inputs[0] = g.UseRegister(m.right().node());
+    opcode = reverse_opcode;
+    input_count++;
+  } else {
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, InstructionCode reverse_opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, reverse_opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArmVldr32;
+      break;
+    case kRepFloat64:
+      opcode = kArmVldr64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kArmLdr;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+    Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+         g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArmVstr32;
+      break;
+    case kRepFloat64:
+      opcode = kArmVstr64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kArmStrb;
+      break;
+    case kRepWord16:
+      opcode = kArmStrh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kArmStr;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+         g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+  }
+}
+
+
+static inline void EmitBic(InstructionSelector* selector, Node* node,
+                           Node* left, Node* right) {
+  ArmOperandGenerator g(selector);
+  InstructionCode opcode = kArmBic;
+  InstructionOperand* value_operand;
+  InstructionOperand* shift_operand;
+  if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+                   value_operand, shift_operand);
+    return;
+  }
+  selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+                 g.DefineAsRegister(node), g.UseRegister(left),
+                 g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(-1)) {
+      EmitBic(this, node, m.right().node(), mleft.left().node());
+      return;
+    }
+  }
+  if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    if (mright.right().Is(-1)) {
+      EmitBic(this, node, m.left().node(), mright.left().node());
+      return;
+    }
+  }
+  if (IsSupported(ARMv7) && m.right().HasValue()) {
+    uint32_t value = m.right().Value();
+    uint32_t width = base::bits::CountPopulation32(value);
+    uint32_t msb = base::bits::CountLeadingZeros32(value);
+    if (width != 0 && msb + width == 32) {
+      DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
+      if (m.left().IsWord32Shr()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().IsInRange(0, 31)) {
+          Emit(kArmUbfx, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()),
+               g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
+          return;
+        }
+      }
+      Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(0), g.TempImmediate(width));
+      return;
+    }
+    // Try to interpret this AND as BFC.
+    width = 32 - width;
+    msb = base::bits::CountLeadingZeros32(~value);
+    uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+    if (msb + width + lsb == 32) {
+      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(lsb), g.TempImmediate(width));
+      return;
+    }
+  }
+  VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    InstructionCode opcode = kArmMvn;
+    InstructionOperand* value_operand;
+    InstructionOperand* shift_operand;
+    if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
+                      &shift_operand)) {
+      Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+      return;
+    }
+    Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmEor, kArmEor);
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              TryMatchShift try_match_shift,
+                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  InstructionCode opcode = kArmMov;
+  InstructionOperand* inputs[4];
+  size_t input_count = 2;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              TryMatchShift try_match_shift) {
+  FlagsContinuation cont;
+  VisitShift(selector, node, try_match_shift, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitShift(this, node, TryMatchLSL);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (IsSupported(ARMv7) && m.left().IsWord32And() &&
+      m.right().IsInRange(0, 31)) {
+    int32_t lsb = m.right().Value();
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+      uint32_t width = base::bits::CountPopulation32(value);
+      uint32_t msb = base::bits::CountLeadingZeros32(value);
+      if (msb + width + lsb == 32) {
+        DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
+        Emit(kArmUbfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+             g.TempImmediate(width));
+        return;
+      }
+    }
+  }
+  VisitShift(this, node, TryMatchLSR);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitShift(this, node, TryMatchASR);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitShift(this, node, TryMatchROR);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+    return;
+  }
+  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+      CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      return;
+    }
+    if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
+      Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      return;
+    }
+  }
+  Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+                    ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+                    InstructionOperand* result_operand,
+                    InstructionOperand* left_operand,
+                    InstructionOperand* right_operand) {
+  ArmOperandGenerator g(selector);
+  if (selector->IsSupported(SUDIV)) {
+    selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+    return;
+  }
+  InstructionOperand* left_double_operand = g.TempDoubleRegister();
+  InstructionOperand* right_double_operand = g.TempDoubleRegister();
+  InstructionOperand* result_double_operand = g.TempDoubleRegister();
+  selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+  selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+  selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+                 right_double_operand);
+  selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+          g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* div_operand = g.TempRegister();
+  InstructionOperand* result_operand = g.DefineAsRegister(node);
+  InstructionOperand* left_operand = g.UseRegister(m.left().node());
+  InstructionOperand* right_operand = g.UseRegister(m.right().node());
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+          left_operand, right_operand);
+  if (selector->IsSupported(MLS)) {
+    selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+                   left_operand);
+    return;
+  }
+  InstructionOperand* mul_operand = g.TempRegister();
+  selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+  selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+         g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()));
+    return;
+  }
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.right().Is(-1.0)) {
+    Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitRRRFloat64(this, kArmVmulF64, node);
+  }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  ArmOperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false);
+
+  // TODO(dcarney): might be possible to use claim/poke instead
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    Emit(kArmPush, NULL, g.UseRegister(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kArmAdd, kArmAdd, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kArmSub, kArmRsb, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[5];
+  size_t input_count = 0;
+  InstructionOperand* outputs[1];
+  size_t output_count = 0;
+
+  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                               &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
+                                      &input_count, &inputs[1])) {
+    if (!commutative) cont->Commute();
+    inputs[0] = g.UseRegister(m.right().node());
+    input_count++;
+  } else {
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else {
+    DCHECK(cont->IsSet());
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+      return VisitWordCompare(this, node, kArmCmn, cont, true);
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kArmCmp, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArmTst, cont, true);
+    case IrOpcode::kWord32Or:
+      return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
+    case IrOpcode::kWord32Xor:
+      return VisitWordCompare(this, node, kArmTeq, cont, true);
+    case IrOpcode::kWord32Sar:
+      return VisitShift(this, node, TryMatchASR, cont);
+    case IrOpcode::kWord32Shl:
+      return VisitShift(this, node, TryMatchLSL, cont);
+    case IrOpcode::kWord32Shr:
+      return VisitShift(this, node, TryMatchLSR, cont);
+    case IrOpcode::kWord32Ror:
+      return VisitShift(this, node, TryMatchROR, cont);
+    default:
+      break;
+  }
+
+  ArmOperandGenerator g(this);
+  InstructionCode opcode =
+      cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+  if (cont->IsBranch()) {
+    Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+         g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+         g.UseRegister(node));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArmCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (cont->IsBranch()) {
+    Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
+         g.UseRegister(m.right().node()), g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+         g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
new file mode 100644
index 0000000..6673a47
--- /dev/null
+++ b/src/compiler/arm/linkage-arm.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct ArmLinkageHelperTraits {
+  static Register ReturnValueReg() { return r0; }
+  static Register ReturnValue2Reg() { return r1; }
+  static Register JSCallFunctionReg() { return r1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return r1; }
+  static Register RuntimeCallArgCountReg() { return r0; }
+  static RegList CCalleeSaveRegisters() {
+    return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
+           r10.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {r0, r1, r2, r3};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<ArmLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
new file mode 100644
index 0000000..31c53d3
--- /dev/null
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -0,0 +1,879 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds Arm64-specific methods to convert InstructionOperands.
+class Arm64OperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Register InputRegister32(int index) {
+    return ToRegister(instr_->InputAt(index)).W();
+  }
+
+  Register InputRegister64(int index) { return InputRegister(index); }
+
+  Operand InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Operand InputOperand64(int index) { return InputOperand(index); }
+
+  Operand InputOperand32(int index) {
+    return ToOperand32(instr_->InputAt(index));
+  }
+
+  Register OutputRegister64() { return OutputRegister(); }
+
+  Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          SXTW);
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand() {
+    int index = 0;
+    return MemoryOperand(&index);
+  }
+
+  Operand ToOperand(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToOperand32(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op).W());
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kInt64:
+        return Operand(constant.ToInt64());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Operand(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Operand(constant.ToHeapObject());
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+                      offset.offset());
+  }
+};
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                       \
+  do {                                                                         \
+    if (instr->InputAt(1)->IsRegister()) {                                     \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),       \
+                   i.InputRegister##width(1));                                 \
+    } else {                                                                   \
+      int64_t imm = i.InputOperand##width(1).immediate().value();              \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
+    }                                                                          \
+  } while (0);
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  Arm64OperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        Register target = i.InputRegister(0);
+        __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+        __ Call(target);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        UseScratchRegisterScope scope(masm());
+        Register temp = scope.AcquireX();
+        __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, temp);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(x10);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ B(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Add:
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Add32:
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+                i.InputOperand32(1));
+      } else {
+        __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      }
+      break;
+    case kArm64And:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64And32:
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Mul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Mul32:
+      __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Idiv:
+      __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Idiv32:
+      __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Udiv:
+      __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Udiv32:
+      __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Imod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Imod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    case kArm64Umod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Umod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    // TODO(dcarney): use mvn instr??
+    case kArm64Not:
+      __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+      break;
+    case kArm64Not32:
+      __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+      break;
+    case kArm64Neg:
+      __ Neg(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kArm64Neg32:
+      __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+      break;
+    case kArm64Or:
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Or32:
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Xor:
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Xor32:
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Sub:
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Sub32:
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+                i.InputOperand32(1));
+      } else {
+        __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      }
+      break;
+    case kArm64Shl:
+      ASSEMBLE_SHIFT(Lsl, 64);
+      break;
+    case kArm64Shl32:
+      ASSEMBLE_SHIFT(Lsl, 32);
+      break;
+    case kArm64Shr:
+      ASSEMBLE_SHIFT(Lsr, 64);
+      break;
+    case kArm64Shr32:
+      ASSEMBLE_SHIFT(Lsr, 32);
+      break;
+    case kArm64Sar:
+      ASSEMBLE_SHIFT(Asr, 64);
+      break;
+    case kArm64Sar32:
+      ASSEMBLE_SHIFT(Asr, 32);
+      break;
+    case kArm64Ror:
+      ASSEMBLE_SHIFT(Ror, 64);
+      break;
+    case kArm64Ror32:
+      ASSEMBLE_SHIFT(Ror, 32);
+      break;
+    case kArm64Mov32:
+      __ Mov(i.OutputRegister32(), i.InputRegister32(0));
+      break;
+    case kArm64Sxtw:
+      __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Claim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Claim(words);
+      break;
+    }
+    case kArm64Poke: {
+      int slot = MiscField::decode(instr->opcode());
+      Operand operand(slot * kPointerSize);
+      __ Poke(i.InputRegister(0), operand);
+      break;
+    }
+    case kArm64PokePairZero: {
+      // TODO(dcarney): test slot offset and register order.
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+      break;
+    }
+    case kArm64PokePair: {
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+      break;
+    }
+    case kArm64Cmp:
+      __ Cmp(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Cmp32:
+      __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Cmn:
+      __ Cmn(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Cmn32:
+      __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Tst:
+      __ Tst(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Tst32:
+      __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Float64Cmp:
+      __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Add:
+      __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Sub:
+      __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mul:
+      __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Div:
+      __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mod: {
+      // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      DCHECK(d0.is(i.InputDoubleRegister(0)));
+      DCHECK(d1.is(i.InputDoubleRegister(1)));
+      DCHECK(d0.is(i.OutputDoubleRegister()));
+      // TODO(dcarney): make sure this saves all relevant registers.
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      break;
+    }
+    case kArm64Float64Sqrt:
+      __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64ToInt32:
+      __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64ToUint32:
+      __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Int32ToFloat64:
+      __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Uint32ToFloat64:
+      __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Ldrb:
+      __ Ldrb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Ldrsb:
+      __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Strb:
+      __ Strb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64Ldrh:
+      __ Ldrh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Ldrsh:
+      __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Strh:
+      __ Strh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LdrW:
+      __ Ldr(i.OutputRegister32(), i.MemoryOperand());
+      break;
+    case kArm64StrW:
+      __ Str(i.InputRegister32(2), i.MemoryOperand());
+      break;
+    case kArm64Ldr:
+      __ Ldr(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Str:
+      __ Str(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LdrS: {
+      UseScratchRegisterScope scope(masm());
+      FPRegister scratch = scope.AcquireS();
+      __ Ldr(scratch, i.MemoryOperand());
+      __ Fcvt(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kArm64StrS: {
+      UseScratchRegisterScope scope(masm());
+      FPRegister scratch = scope.AcquireS();
+      __ Fcvt(scratch, i.InputDoubleRegister(2));
+      __ Str(scratch, i.MemoryOperand());
+      break;
+    }
+    case kArm64LdrD:
+      __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kArm64StrD:
+      __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kArm64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ Add(index, object, Operand(index, SXTW));
+      __ Str(value, MemOperand(index));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      // TODO(dcarney): we shouldn't test write barriers from c calls.
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      UseScratchRegisterScope scope(masm());
+      Register temp = no_reg;
+      if (csp.is(masm()->StackPointer())) {
+        temp = scope.AcquireX();
+        lr_status = kLRHasBeenSaved;
+        __ Push(lr, temp);  // Need to push a pair
+      }
+      __ RecordWrite(object, index, value, lr_status, mode);
+      if (csp.is(masm()->StackPointer())) {
+        __ Pop(temp, lr);
+      }
+      break;
+    }
+  }
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ B(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ B(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ B(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ B(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ B(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ B(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ B(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ B(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ B(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ B(hi, tlabel);
+      break;
+    case kOverflow:
+      __ B(vs, tlabel);
+      break;
+    case kNotOverflow:
+      __ B(vc, tlabel);
+      break;
+  }
+  if (!fallthru) __ B(flabel);  // no fallthru to flabel.
+  __ Bind(&done);
+}
+
+
+// Assemble boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 64-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = nv;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+    case kOverflow:
+      cc = vs;
+      break;
+    case kNotOverflow:
+      cc = vc;
+      break;
+  }
+  __ bind(&check);
+  __ Cset(reg, cc);
+  __ Bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// TODO(dcarney): increase stack slots in frame once before first use.
+static int AlignedStackSlots(int stack_slots) {
+  if (stack_slots & 1) stack_slots++;
+  return stack_slots;
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ SetStackPointer(csp);
+    __ Push(lr, fp);
+    __ Mov(fp, csp);
+    // TODO(dcarney): correct callee saved registers.
+    __ PushCalleeSavedRegisters();
+    frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ SetStackPointer(jssp);
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+      __ Ldr(x10, GlobalObjectMemOperand());
+      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
+      __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ Bind(&ok);
+    }
+
+  } else {
+    __ SetStackPointer(jssp);
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    Register sp = __ StackPointer();
+    if (!sp.Is(csp)) {
+      __ Sub(sp, sp, stack_slots * kPointerSize);
+    }
+    __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+      }
+      // Restore registers.
+      // TODO(dcarney): correct callee saved registers.
+      __ PopCalleeSavedRegisters();
+    }
+    __ Mov(csp, fp);
+    __ Pop(fp, lr);
+    __ Ret();
+  } else {
+    __ Mov(jssp, fp);
+    __ Pop(fp, lr);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Mov(g.ToRegister(destination), src);
+    } else {
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand src = g.ToMemOperand(source, masm());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    if (destination->IsRegister()) {
+      __ Ldr(g.ToRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      UseScratchRegisterScope scope(masm());
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : scope.AcquireX();
+      Constant src = g.ToConstant(source);
+      if (src.type() == Constant::kHeapObject) {
+        __ LoadObject(dst, src.ToHeapObject());
+      } else {
+        __ Mov(dst, g.ToImmediate(source));
+      }
+      if (destination->IsStackSlot()) {
+        __ Str(dst, g.ToMemOperand(destination, masm()));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      FPRegister result = g.ToDoubleRegister(destination);
+      __ Fmov(result, g.ToDouble(constant_source));
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Fmov(temp, g.ToDouble(constant_source));
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source, masm());
+    if (destination->IsDoubleRegister()) {
+      __ Ldr(g.ToDoubleRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    UseScratchRegisterScope scope(masm());
+    Register temp = scope.AcquireX();
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Mov(temp, src);
+      __ Mov(src, dst);
+      __ Mov(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Mov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    UseScratchRegisterScope scope(masm());
+    CPURegister temp_0 = scope.AcquireX();
+    CPURegister temp_1 = scope.AcquireX();
+    MemOperand src = g.ToMemOperand(source, masm());
+    MemOperand dst = g.ToMemOperand(destination, masm());
+    __ Ldr(temp_0, src);
+    __ Ldr(temp_1, dst);
+    __ Str(temp_0, dst);
+    __ Str(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    UseScratchRegisterScope scope(masm());
+    FPRegister temp = scope.AcquireD();
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(temp, src);
+      __ Fmov(src, dst);
+      __ Fmov(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Fmov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    intptr_t current_pc = masm()->pc_offset();
+
+    if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+      intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK((padding_size % kInstructionSize) == 0);
+      InstructionAccurateScope instruction_accurate(
+          masm(), padding_size / kInstructionSize);
+
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= kInstructionSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
new file mode 100644
index 0000000..0a9a2ed
--- /dev/null
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(Arm64Add)                      \
+  V(Arm64Add32)                    \
+  V(Arm64And)                      \
+  V(Arm64And32)                    \
+  V(Arm64Cmp)                      \
+  V(Arm64Cmp32)                    \
+  V(Arm64Cmn)                      \
+  V(Arm64Cmn32)                    \
+  V(Arm64Tst)                      \
+  V(Arm64Tst32)                    \
+  V(Arm64Or)                       \
+  V(Arm64Or32)                     \
+  V(Arm64Xor)                      \
+  V(Arm64Xor32)                    \
+  V(Arm64Sub)                      \
+  V(Arm64Sub32)                    \
+  V(Arm64Mul)                      \
+  V(Arm64Mul32)                    \
+  V(Arm64Idiv)                     \
+  V(Arm64Idiv32)                   \
+  V(Arm64Udiv)                     \
+  V(Arm64Udiv32)                   \
+  V(Arm64Imod)                     \
+  V(Arm64Imod32)                   \
+  V(Arm64Umod)                     \
+  V(Arm64Umod32)                   \
+  V(Arm64Not)                      \
+  V(Arm64Not32)                    \
+  V(Arm64Neg)                      \
+  V(Arm64Neg32)                    \
+  V(Arm64Shl)                      \
+  V(Arm64Shl32)                    \
+  V(Arm64Shr)                      \
+  V(Arm64Shr32)                    \
+  V(Arm64Sar)                      \
+  V(Arm64Sar32)                    \
+  V(Arm64Ror)                      \
+  V(Arm64Ror32)                    \
+  V(Arm64Mov32)                    \
+  V(Arm64Sxtw)                     \
+  V(Arm64Claim)                    \
+  V(Arm64Poke)                     \
+  V(Arm64PokePairZero)             \
+  V(Arm64PokePair)                 \
+  V(Arm64Float64Cmp)               \
+  V(Arm64Float64Add)               \
+  V(Arm64Float64Sub)               \
+  V(Arm64Float64Mul)               \
+  V(Arm64Float64Div)               \
+  V(Arm64Float64Mod)               \
+  V(Arm64Float64Sqrt)              \
+  V(Arm64Float64ToInt32)           \
+  V(Arm64Float64ToUint32)          \
+  V(Arm64Int32ToFloat64)           \
+  V(Arm64Uint32ToFloat64)          \
+  V(Arm64LdrS)                     \
+  V(Arm64StrS)                     \
+  V(Arm64LdrD)                     \
+  V(Arm64StrD)                     \
+  V(Arm64Ldrb)                     \
+  V(Arm64Ldrsb)                    \
+  V(Arm64Strb)                     \
+  V(Arm64Ldrh)                     \
+  V(Arm64Ldrsh)                    \
+  V(Arm64Strh)                     \
+  V(Arm64LdrW)                     \
+  V(Arm64StrW)                     \
+  V(Arm64Ldr)                      \
+  V(Arm64Str)                      \
+  V(Arm64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace internal
+}  // namespace compiler
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
new file mode 100644
index 0000000..b5562c2
--- /dev/null
+++ b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -0,0 +1,1121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+template <typename T>
+struct MachInst {
+  T constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  MachineType machine_type;
+};
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+  return os << mi.constructor_name;
+}
+
+
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
+                    int64_t value) {
+  switch (type) {
+    case kMachInt32:
+      return m.Int32Constant(value);
+      break;
+
+    case kMachInt64:
+      return m.Int64Constant(value);
+      break;
+
+    default:
+      UNIMPLEMENTED();
+  }
+  return NULL;
+}
+
+
+// ARM64 logical instructions.
+static const MachInst2 kLogicalInstructions[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
+    {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
+    {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Xor32, kMachInt32},
+    {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Xor, kMachInt64}};
+
+
+// ARM64 logical immediates: contiguous set bits, rotated about a power of two
+// sized block. The block is then duplicated across the word. Below is a random
+// subset of the 32-bit immediates.
+static const uint32_t kLogicalImmediates[] = {
+    0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
+    0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
+    0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
+    0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
+    0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
+    0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
+    0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
+    0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
+    0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
+    0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
+    0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
+    0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
+    0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
+    0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
+    0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
+    0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
+    0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
+
+
+// ARM64 arithmetic instructions.
+static const MachInst2 kAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+    {&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+    {&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64}};
+
+
+// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
+// Below is a combination of a random subset and some edge values.
+static const int32_t kAddSubImmediates[] = {
+    0,        1,        69,       493,      599,      701,      719,
+    768,      818,      842,      945,      1246,     1286,     1429,
+    1669,     2171,     2179,     2182,     2254,     2334,     2338,
+    2343,     2396,     2449,     2610,     2732,     2855,     2876,
+    2944,     3377,     3458,     3475,     3476,     3540,     3574,
+    3601,     3813,     3871,     3917,     4095,     4096,     16384,
+    364544,   462848,   970752,   1523712,  1863680,  2363392,  3219456,
+    3280896,  4247552,  4526080,  4575232,  4960256,  5505024,  5894144,
+    6004736,  6193152,  6385664,  6795264,  7114752,  7233536,  7348224,
+    7499776,  7573504,  7729152,  8634368,  8937472,  9465856,  10354688,
+    10682368, 11059200, 11460608, 13168640, 13176832, 14336000, 15028224,
+    15597568, 15892480, 16773120};
+
+
+// ARM64 flag setting data processing instructions.
+static const MachInst2 kDPFlagSetInstructions[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32}};
+
+
+// ARM64 arithmetic with overflow instructions.
+static const MachInst2 kOvfAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+     kArm64Add32, kMachInt32},
+    {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+     kArm64Sub32, kMachInt32}};
+
+
+// ARM64 shift instructions.
+static const MachInst2 kShiftInstructions[] = {
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32},
+    {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32},
+    {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64},
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32},
+    {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+    {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
+
+
+// ARM64 Mul/Div instructions.
+static const MachInst2 kMulDivInstructions[] = {
+    {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
+    {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
+    {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
+    {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
+    {&RawMachineAssembler::Int32UDiv, "Int32UDiv", kArm64Udiv32, kMachInt32},
+    {&RawMachineAssembler::Int64UDiv, "Int64UDiv", kArm64Udiv, kMachInt64}};
+
+
+// ARM64 FP arithmetic instructions.
+static const MachInst2 kFPArithInstructions[] = {
+    {&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
+     kMachFloat64}};
+
+
+struct FPCmp {
+  MachInst2 mi;
+  FlagsCondition cond;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
+  return os << cmp.mi;
+}
+
+
+// ARM64 FP comparison instructions.
+static const FPCmp kFPCmpInstructions[] = {
+    {{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
+      kMachFloat64},
+     kUnorderedEqual},
+    {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+      kArm64Float64Cmp, kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+      kArm64Float64Cmp, kMachFloat64},
+     kUnorderedLessThanOrEqual}};
+
+
+struct Conversion {
+  // The machine_type field in MachInst1 represents the destination type.
+  MachInst1 mi;
+  MachineType src_machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
+  return os << conv.mi;
+}
+
+
+// ARM64 type conversion instructions.
+static const Conversion kConversionInstructions[] = {
+    {{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
+      kArm64Sxtw, kMachInt64},
+     kMachInt32},
+    {{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
+      kArm64Mov32, kMachUint64},
+     kMachUint32},
+    {{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
+      kArm64Mov32, kMachInt32},
+     kMachInt64},
+    {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+      kArm64Int32ToFloat64, kMachFloat64},
+     kMachInt32},
+    {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+      kArm64Uint32ToFloat64, kMachFloat64},
+     kMachUint32},
+    {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+      kArm64Float64ToInt32, kMachInt32},
+     kMachFloat64},
+    {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+      kArm64Float64ToUint32, kMachUint32},
+     kMachFloat64}};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Logical instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorLogicalTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  // TODO(all): Add support for testing 64-bit immediates.
+  if (type == kMachInt32) {
+    // Immediate on the right.
+    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+      StreamBuilder m(this, type, type);
+      m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+
+    // Immediate on the left; all logical ops should commute.
+    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+      StreamBuilder m(this, type, type);
+      m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+                        ::testing::ValuesIn(kLogicalInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Add and Sub instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorAddSubTest;
+
+
+TEST_P(InstructionSelectorAddSubTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+    Stream s = m.Build();
+
+    // Add can support an immediate on the left by commuting, but Sub can't
+    // commute. We test zero-on-left Sub later.
+    if (strstr(dpi.constructor_name, "Add") != NULL) {
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
+                        ::testing::ValuesIn(kAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
+  // Subtraction with zero on the left maps to Neg.
+  {
+    // 32-bit subtract.
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
+    EXPECT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    // 64-bit subtract.
+    StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+    m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
+    EXPECT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Data processing controlled branches.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorDPFlagSetTest;
+
+
+TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  MLabel a, b;
+  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorDPFlagSetTest,
+                        ::testing::ValuesIn(kDPFlagSetInstructions));
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_LE(1U, s[0]->InputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+    ASSERT_LE(1U, s[0]->InputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Add and subtract instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorOvfAddSubTest;
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return(
+      m.Projection(1, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return(m.Projection(
+        1, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return(
+      m.Projection(0, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return(m.Projection(
+        0, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+  Stream s = m.Build();
+  ASSERT_LE(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  MLabel a, b;
+  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Projection(1, n), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(0));
+  m.Bind(&b);
+  m.Return(m.Projection(0, n));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    MLabel a, b;
+    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorOvfAddSubTest,
+                        ::testing::ValuesIn(kOvfAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Shift instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShiftInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Mul and Div instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+                        ::testing::ValuesIn(kMulDivInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Floating point instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+  const MachInst2 fpa = GetParam();
+  StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+  m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+                        ::testing::ValuesIn(kFPArithInstructions));
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+  const FPCmp cmp = GetParam();
+  StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+                        ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+typedef InstructionSelectorTestWithParam<Conversion>
+    InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+  const Conversion conv = GetParam();
+  StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+  m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorConversionTest,
+                        ::testing::ValuesIn(kConversionInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode ldr_opcode;
+  ArchOpcode str_opcode;
+  const int32_t immediates[20];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+}  // namespace
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kArm64Ldrsb, kArm64Strb,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+      2121, 2442, 4093, 4094, 4095}},
+    {kMachUint8, kArm64Ldrb, kArm64Strb,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+      2121, 2442, 4093, 4094, 4095}},
+    {kMachInt16, kArm64Ldrsh, kArm64Strh,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+      4100, 4242, 6786, 8188, 8190}},
+    {kMachUint16, kArm64Ldrh, kArm64Strh,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+      4100, 4242, 6786, 8188, 8190}},
+    {kMachInt32, kArm64LdrW, kArm64StrW,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachUint32, kArm64LdrW, kArm64StrW,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachInt64, kArm64Ldr, kArm64Str,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}},
+    {kMachUint64, kArm64Ldr, kArm64Str,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}},
+    {kMachFloat32, kArm64LdrS, kArm64StrS,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachFloat64, kArm64LdrD, kArm64StrD,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}}};
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Comparison instructions.
+
+static const MachInst2 kComparisonInstructions[] = {
+    {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
+    {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, WithParameters) {
+  const MachInst2 cmp = GetParam();
+  const MachineType type = cmp.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
+  const MachInst2 cmp = GetParam();
+  const MachineType type = cmp.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorComparisonTest,
+                        ::testing::ValuesIn(kComparisonInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
new file mode 100644
index 0000000..472ce6f
--- /dev/null
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -0,0 +1,697 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
+  kShift32Imm,     // 0 - 31
+  kShift64Imm,     // 0 - 63
+  kLogical32Imm,
+  kLogical64Imm,
+  kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
+  kLoadStoreImm16,
+  kLoadStoreImm32,
+  kLoadStoreImm64,
+  kNoImmediate
+};
+
+
+// Adds Arm64-specific methods for generating operands.
+class Arm64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit Arm64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    unsigned ignored;
+    switch (mode) {
+      case kLogical32Imm:
+        // TODO(dcarney): some unencodable values can be handled by
+        // switching instructions.
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
+                                       &ignored, &ignored, &ignored);
+      case kLogical64Imm:
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
+                                       &ignored, &ignored, &ignored);
+      case kArithmeticImm:
+        // TODO(dcarney): -values can be handled by instruction swapping
+        return Assembler::IsImmAddSub(value);
+      case kShift32Imm:
+        return 0 <= value && value < 32;
+      case kShift64Imm:
+        return 0 <= value && value < 64;
+      case kLoadStoreImm8:
+        return IsLoadStoreImmediate(value, LSByte);
+      case kLoadStoreImm16:
+        return IsLoadStoreImmediate(value, LSHalfword);
+      case kLoadStoreImm32:
+        return IsLoadStoreImmediate(value, LSWord);
+      case kLoadStoreImm64:
+        return IsLoadStoreImmediate(value, LSDoubleWord);
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+
+ private:
+  bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+    return Assembler::IsImmLSScaled(value, size) ||
+           Assembler::IsImmLSUnscaled(value);
+  }
+};
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node, ImmediateMode operand_mode) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, ImmediateMode operand_mode,
+                       FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       ArchOpcode opcode, ImmediateMode operand_mode) {
+  FlagsContinuation cont;
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode;
+  ImmediateMode immediate_mode = kNoImmediate;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArm64LdrS;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepFloat64:
+      opcode = kArm64LdrD;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+      immediate_mode = kLoadStoreImm8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+      immediate_mode = kLoadStoreImm16;
+      break;
+    case kRepWord32:
+      opcode = kArm64LdrW;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kArm64Ldr;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, immediate_mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
+    Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+         g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  ArchOpcode opcode;
+  ImmediateMode immediate_mode = kNoImmediate;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArm64StrS;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepFloat64:
+      opcode = kArm64StrD;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kArm64Strb;
+      immediate_mode = kLoadStoreImm8;
+      break;
+    case kRepWord16:
+      opcode = kArm64Strh;
+      immediate_mode = kLoadStoreImm16;
+      break;
+    case kRepWord32:
+      opcode = kArm64StrW;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kArm64Str;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, immediate_mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+         g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
+  }
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitRRO(this, kArm64Shl, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitRRO(this, kArm64Shr, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kArm64Sar, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kArm64Ror32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kArm64Ror, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kArm64Neg32, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
+  }
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kArm64Mul32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kArm64Mul, node);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kArm64Idiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kArm64Idiv, node);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv, node);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kArm64Imod32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kArm64Imod, node);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitRRR(this, kArm64Umod32, node);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitRRR(this, kArm64Umod, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Div, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
+       g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  Arm64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, kArithmeticImm)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, kArithmeticImm)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+      return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArm64Tst32, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
+               cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kArm64Tst, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  Arm64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  Arm64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false);
+
+  // Push the arguments to the stack.
+  bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+  int aligned_push_count = buffer.pushed_nodes.size();
+  // TODO(dcarney): claim and poke probably take small immediates,
+  //                loop here or whatever.
+  // Bump the stack pointer(s).
+  if (aligned_push_count > 0) {
+    // TODO(dcarney): it would be better to bump the csp here only
+    //                and emit paired stores with increment for non c frames.
+    Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+  }
+  // Move arguments to the stack.
+  {
+    int slot = buffer.pushed_nodes.size() - 1;
+    // Emit the uneven pushes.
+    if (pushed_count_uneven) {
+      Node* input = buffer.pushed_nodes[slot];
+      Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
+      slot--;
+    }
+    // Now all pushes can be done in pairs.
+    for (; slot >= 0; slot -= 2) {
+      Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+           g.UseRegister(buffer.pushed_nodes[slot]),
+           g.UseRegister(buffer.pushed_nodes[slot - 1]));
+    }
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
new file mode 100644
index 0000000..2be2cb1
--- /dev/null
+++ b/src/compiler/arm64/linkage-arm64.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct Arm64LinkageHelperTraits {
+  static Register ReturnValueReg() { return x0; }
+  static Register ReturnValue2Reg() { return x1; }
+  static Register JSCallFunctionReg() { return x1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return x1; }
+  static Register RuntimeCallArgCountReg() { return x0; }
+  static RegList CCalleeSaveRegisters() {
+    // TODO(dcarney): correct callee saved registers.
+    return 0;
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
new file mode 100644
index 0000000..74fb0ae
--- /dev/null
+++ b/src/compiler/ast-graph-builder.cc
@@ -0,0 +1,2034 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph)
+    : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+      info_(info),
+      jsgraph_(jsgraph),
+      globals_(0, info->zone()),
+      breakable_(NULL),
+      execution_context_(NULL) {
+  InitializeAstVisitor(info->zone());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosure() {
+  if (!function_closure_.is_set()) {
+    // Parameter -1 is special for the function closure
+    const Operator* op = common()->Parameter(-1);
+    Node* node = NewNode(op, graph()->start());
+    function_closure_.set(node);
+  }
+  return function_closure_.get();
+}
+
+
+Node* AstGraphBuilder::GetFunctionContext() {
+  if (!function_context_.is_set()) {
+    // Parameter (arity + 1) is special for the outer context of the function
+    const Operator* op = common()->Parameter(info()->num_parameters() + 1);
+    Node* node = NewNode(op, graph()->start());
+    function_context_.set(node);
+  }
+  return function_context_.get();
+}
+
+
+bool AstGraphBuilder::CreateGraph() {
+  Scope* scope = info()->scope();
+  DCHECK(graph() != NULL);
+
+  // Set up the basic structure of the graph.
+  int parameter_count = info()->num_parameters();
+  graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+
+  // Initialize the top-level environment.
+  Environment env(this, scope, graph()->start());
+  set_environment(&env);
+
+  // Build node to initialize local function context.
+  Node* closure = GetFunctionClosure();
+  Node* outer = GetFunctionContext();
+  Node* inner = BuildLocalFunctionContext(outer, closure);
+
+  // Push top-level function scope for the function body.
+  ContextScope top_context(this, scope, inner);
+
+  // Build the arguments object if it is used.
+  BuildArgumentsObject(scope->arguments());
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+  }
+
+  // Visit implicit declaration of the function name.
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    VisitVariableDeclaration(scope->function());
+  }
+
+  // Visit declarations within the function scope.
+  VisitDeclarations(scope->declarations());
+
+  // TODO(mstarzinger): This should do an inlined stack check.
+  Node* node = NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+  PrepareFrameState(node, BailoutId::FunctionEntry());
+
+  // Visit statements in the function body.
+  VisitStatements(info()->function()->body());
+  if (HasStackOverflow()) return false;
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    // TODO(mstarzinger): Only traces implicit return.
+    Node* return_value = jsgraph()->UndefinedConstant();
+    NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+  }
+
+  // Return 'undefined' in case we can fall off the end.
+  Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
+  UpdateControlDependencyToLeaveFunction(control);
+
+  // Finish the basic structure of the graph.
+  environment()->UpdateControlDependency(exit_control());
+  graph()->SetEnd(NewNode(common()->End()));
+
+  return true;
+}
+
+
+// Left-hand side can only be a property, a global or a variable slot.
+enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+
+
+// Determine the left-hand side kind of an assignment.
+static LhsKind DetermineLhsKind(Expression* expr) {
+  Property* property = expr->AsProperty();
+  DCHECK(expr->IsValidReferenceExpression());
+  LhsKind lhs_kind =
+      (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
+                                          ? NAMED_PROPERTY
+                                          : KEYED_PROPERTY;
+  return lhs_kind;
+}
+
+
+// Helper to find an existing shared function info in the baseline code for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+    Code* unoptimized_code, FunctionLiteral* expr) {
+  int start_position = expr->start_position();
+  for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+    Object* obj = rinfo->target_object();
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+      if (shared->start_position() == start_position) {
+        return Handle<SharedFunctionInfo>(shared);
+      }
+    }
+  }
+  return Handle<SharedFunctionInfo>();
+}
+
+
+StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
+    StructuredGraphBuilder::Environment* env) {
+  return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+}
+
+
+AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
+                                          Scope* scope,
+                                          Node* control_dependency)
+    : StructuredGraphBuilder::Environment(builder, control_dependency),
+      parameters_count_(scope->num_parameters() + 1),
+      locals_count_(scope->num_stack_slots()),
+      parameters_node_(NULL),
+      locals_node_(NULL),
+      stack_node_(NULL) {
+  DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
+
+  // Bind the receiver variable.
+  Node* receiver = builder->graph()->NewNode(common()->Parameter(0),
+                                             builder->graph()->start());
+  values()->push_back(receiver);
+
+  // Bind all parameter variables. The parameter indices are shifted by 1
+  // (receiver is parameter index -1 but environment index 0).
+  for (int i = 0; i < scope->num_parameters(); ++i) {
+    Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1),
+                                                builder->graph()->start());
+    values()->push_back(parameter);
+  }
+
+  // Bind all local variables to undefined.
+  Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+  values()->insert(values()->end(), locals_count(), undefined_constant);
+}
+
+
+AstGraphBuilder::Environment::Environment(const Environment& copy)
+    : StructuredGraphBuilder::Environment(
+          static_cast<StructuredGraphBuilder::Environment>(copy)),
+      parameters_count_(copy.parameters_count_),
+      locals_count_(copy.locals_count_),
+      parameters_node_(copy.parameters_node_),
+      locals_node_(copy.locals_node_),
+      stack_node_(copy.stack_node_) {}
+
+
+void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+                                                     int offset, int count) {
+  bool should_update = false;
+  Node** env_values = (count == 0) ? NULL : &values()->at(offset);
+  if (*state_values == NULL || (*state_values)->InputCount() != count) {
+    should_update = true;
+  } else {
+    DCHECK(static_cast<size_t>(offset + count) <= values()->size());
+    for (int i = 0; i < count; i++) {
+      if ((*state_values)->InputAt(i) != env_values[i]) {
+        should_update = true;
+        break;
+      }
+    }
+  }
+  if (should_update) {
+    const Operator* op = common()->StateValues(count);
+    (*state_values) = graph()->NewNode(op, count, env_values);
+  }
+}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(
+    BailoutId ast_id, OutputFrameStateCombine combine) {
+  UpdateStateValues(&parameters_node_, 0, parameters_count());
+  UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+  UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
+                    stack_height());
+
+  const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
+
+  return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
+                          GetContext(),
+                          builder()->jsgraph()->UndefinedConstant());
+}
+
+
+AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
+                                        Expression::Context kind)
+    : kind_(kind), owner_(own), outer_(own->ast_context()) {
+  owner()->set_ast_context(this);  // Push.
+#ifdef DEBUG
+  original_height_ = environment()->stack_height();
+#endif
+}
+
+
+AstGraphBuilder::AstContext::~AstContext() {
+  owner()->set_ast_context(outer_);  // Pop.
+}
+
+
+AstGraphBuilder::AstEffectContext::~AstEffectContext() {
+  DCHECK(environment()->stack_height() == original_height_);
+}
+
+
+AstGraphBuilder::AstValueContext::~AstValueContext() {
+  DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+AstGraphBuilder::AstTestContext::~AstTestContext() {
+  DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+  // The value is ignored.
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+  environment()->Push(value);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+  environment()->Push(owner()->BuildToBoolean(value));
+}
+
+
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+
+
+Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
+    BreakableStatement* target) {
+  BreakableScope* current = this;
+  while (current != NULL && current->target_ != target) {
+    owner_->environment()->Drop(current->drop_extra_);
+    current = current->next_;
+  }
+  DCHECK(current != NULL);  // Always found (unless stack is malformed).
+  return current;
+}
+
+
+void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Break();
+}
+
+
+void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Continue();
+}
+
+
+void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
+  if (expr == NULL) {
+    return environment()->Push(jsgraph()->NullConstant());
+  }
+  VisitForValue(expr);
+}
+
+
+void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    VisitForValue(exprs->at(i));
+  }
+}
+
+
+void AstGraphBuilder::VisitForValue(Expression* expr) {
+  AstValueContext for_value(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForEffect(Expression* expr) {
+  AstEffectContext for_effect(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForTest(Expression* expr) {
+  AstTestContext for_condition(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  VariableMode mode = decl->mode();
+  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<Oddball> value = variable->binding_needs_init()
+                                  ? isolate()->factory()->the_hole_value()
+                                  : isolate()->factory()->undefined_value();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(value, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        environment()->Bind(variable, value);
+      }
+      break;
+    case Variable::CONTEXT:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        const Operator* op = javascript()->StoreContext(0, variable->index());
+        NewNode(op, current_context(), value);
+      }
+      break;
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(function, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      environment()->Bind(variable, value);
+      break;
+    }
+    case Variable::CONTEXT: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      const Operator* op = javascript()->StoreContext(0, variable->index());
+      NewNode(op, current_context(), value);
+      break;
+    }
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitBlock(Block* stmt) {
+  BlockBuilder block(this);
+  BreakableScope scope(this, stmt, &block, 0);
+  if (stmt->labels() != NULL) block.BeginBlock();
+  if (stmt->scope() == NULL) {
+    // Visit statements in the same scope, no declarations.
+    VisitStatements(stmt->statements());
+  } else {
+    const Operator* op = javascript()->CreateBlockContext();
+    Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
+    Node* context = NewNode(op, scope_info, GetFunctionClosure());
+    ContextScope scope(this, stmt->scope(), context);
+
+    // Visit declarations and statements in a block scope.
+    VisitDeclarations(stmt->scope()->declarations());
+    VisitStatements(stmt->statements());
+  }
+  if (stmt->labels() != NULL) block.EndBlock();
+}
+
+
+void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Do nothing.
+}
+
+
+void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  IfBuilder compare_if(this);
+  VisitForTest(stmt->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(stmt->then_statement());
+  compare_if.Else();
+  Visit(stmt->else_statement());
+  compare_if.End();
+}
+
+
+void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->ContinueTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->BreakTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* result = environment()->Pop();
+  Node* control = NewNode(common()->Return(), result);
+  UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* value = environment()->Pop();
+  const Operator* op = javascript()->CreateWithContext();
+  Node* context = NewNode(op, value, GetFunctionClosure());
+  ContextScope scope(this, stmt->scope(), context);
+  Visit(stmt->statement());
+}
+
+
+void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  SwitchBuilder compare_switch(this, clauses->length());
+  BreakableScope scope(this, stmt, &compare_switch, 0);
+  compare_switch.BeginSwitch();
+  int default_index = -1;
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag());
+  Node* tag = environment()->Top();
+
+  // Iterate over all cases and create nodes for label comparison.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+
+    // The default is not a test, remember index.
+    if (clause->is_default()) {
+      default_index = i;
+      continue;
+    }
+
+    // Create nodes to perform label comparison as if via '==='. The switch
+    // value is still on the operand stack while the label is evaluated.
+    VisitForValue(clause->label());
+    Node* label = environment()->Pop();
+    const Operator* op = javascript()->StrictEqual();
+    Node* condition = NewNode(op, tag, label);
+    compare_switch.BeginLabel(i, condition);
+
+    // Discard the switch value at label match.
+    environment()->Pop();
+    compare_switch.EndLabel();
+  }
+
+  // Discard the switch value and mark the default case.
+  environment()->Pop();
+  if (default_index >= 0) {
+    compare_switch.DefaultAt(default_index);
+  }
+
+  // Iterate over all cases and create nodes for case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    compare_switch.BeginCase(i);
+    VisitStatements(clause->statements());
+    compare_switch.EndCase();
+  }
+
+  compare_switch.EndSwitch();
+}
+
+
+void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  LoopBuilder for_loop(this);
+  VisitIfNotNull(stmt->init());
+  for_loop.BeginLoop();
+  if (stmt->cond() != NULL) {
+    VisitForTest(stmt->cond());
+    Node* condition = environment()->Pop();
+    for_loop.BreakUnless(condition);
+  }
+  VisitIterationBody(stmt, &for_loop, 0);
+  for_loop.EndBody();
+  VisitIfNotNull(stmt->next());
+  for_loop.EndLoop();
+}
+
+
+// TODO(dcarney): this is a big function.  Try to clean up some.
+void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  VisitForValue(stmt->subject());
+  Node* obj = environment()->Pop();
+  // Check for undefined or null before entering loop.
+  IfBuilder is_undefined(this);
+  Node* is_undefined_cond =
+      NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
+  is_undefined.If(is_undefined_cond);
+  is_undefined.Then();
+  is_undefined.Else();
+  {
+    IfBuilder is_null(this);
+    Node* is_null_cond =
+        NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
+    is_null.If(is_null_cond);
+    is_null.Then();
+    is_null.Else();
+    // Convert object to jsobject.
+    // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+    obj = NewNode(javascript()->ToObject(), obj);
+    environment()->Push(obj);
+    // TODO(dcarney): should do a fast enum cache check here to skip runtime.
+    environment()->Push(obj);
+    Node* cache_type = ProcessArguments(
+        javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+    // TODO(dcarney): these next runtime calls should be removed in favour of
+    //                a few simplified instructions.
+    environment()->Push(obj);
+    environment()->Push(cache_type);
+    Node* cache_pair =
+        ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+    // cache_type may have been replaced.
+    Node* cache_array = NewNode(common()->Projection(0), cache_pair);
+    cache_type = NewNode(common()->Projection(1), cache_pair);
+    environment()->Push(cache_type);
+    environment()->Push(cache_array);
+    Node* cache_length = ProcessArguments(
+        javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+    {
+      // TODO(dcarney): this check is actually supposed to be for the
+      //                empty enum case only.
+      IfBuilder have_no_properties(this);
+      Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
+                                       cache_length, jsgraph()->ZeroConstant());
+      have_no_properties.If(empty_array_cond);
+      have_no_properties.Then();
+      // Pop obj and skip loop.
+      environment()->Pop();
+      have_no_properties.Else();
+      {
+        // Construct the rest of the environment.
+        environment()->Push(cache_type);
+        environment()->Push(cache_array);
+        environment()->Push(cache_length);
+        environment()->Push(jsgraph()->ZeroConstant());
+        // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+        LoopBuilder for_loop(this);
+        for_loop.BeginLoop();
+        // Check loop termination condition.
+        Node* index = environment()->Peek(0);
+        Node* exit_cond =
+            NewNode(javascript()->LessThan(), index, cache_length);
+        // TODO(jarin): provide real bailout id.
+        PrepareFrameState(exit_cond, BailoutId::None());
+        for_loop.BreakUnless(exit_cond);
+        // TODO(dcarney): this runtime call should be a handful of
+        //                simplified instructions that
+        //                basically produce
+        //                    value = array[index]
+        environment()->Push(obj);
+        environment()->Push(cache_array);
+        environment()->Push(cache_type);
+        environment()->Push(index);
+        Node* pair =
+            ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4);
+        Node* value = NewNode(common()->Projection(0), pair);
+        Node* should_filter = NewNode(common()->Projection(1), pair);
+        environment()->Push(value);
+        {
+          // Test if FILTER_KEY needs to be called.
+          IfBuilder test_should_filter(this);
+          Node* should_filter_cond =
+              NewNode(javascript()->StrictEqual(), should_filter,
+                      jsgraph()->TrueConstant());
+          test_should_filter.If(should_filter_cond);
+          test_should_filter.Then();
+          value = environment()->Pop();
+          Node* builtins = BuildLoadBuiltinsObject();
+          Node* function = BuildLoadObjectField(
+              builtins,
+              JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
+          // Callee.
+          environment()->Push(function);
+          // Receiver.
+          environment()->Push(obj);
+          // Args.
+          environment()->Push(value);
+          // result is either the string key or Smi(0) indicating the property
+          // is gone.
+          Node* res = ProcessArguments(
+              javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+          // TODO(jarin): provide real bailout id.
+          PrepareFrameState(res, BailoutId::None());
+          Node* property_missing = NewNode(javascript()->StrictEqual(), res,
+                                           jsgraph()->ZeroConstant());
+          {
+            IfBuilder is_property_missing(this);
+            is_property_missing.If(property_missing);
+            is_property_missing.Then();
+            // Inc counter and continue.
+            Node* index_inc =
+                NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+            // TODO(jarin): provide real bailout id.
+            PrepareFrameState(index_inc, BailoutId::None());
+            environment()->Poke(0, index_inc);
+            for_loop.Continue();
+            is_property_missing.Else();
+            is_property_missing.End();
+          }
+          // Replace 'value' in environment.
+          environment()->Push(res);
+          test_should_filter.Else();
+          test_should_filter.End();
+        }
+        value = environment()->Pop();
+        // Bind value and do loop body.
+        VisitForInAssignment(stmt->each(), value);
+        VisitIterationBody(stmt, &for_loop, 5);
+        for_loop.EndBody();
+        // Inc counter and continue.
+        Node* index_inc =
+            NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+        // TODO(jarin): provide real bailout id.
+        PrepareFrameState(index_inc, BailoutId::None());
+        environment()->Poke(0, index_inc);
+        for_loop.EndLoop();
+        environment()->Drop(5);
+        // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+      }
+      have_no_properties.End();
+    }
+    is_null.End();
+  }
+  is_undefined.End();
+}
+
+
+void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+  VisitForValue(stmt->subject());
+  environment()->Pop();
+  // TODO(turbofan): create and use loop builder.
+}
+
+
+void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  // TODO(turbofan): Do we really need a separate reloc-info for this?
+  Node* node = NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+  PrepareFrameState(node, stmt->DebugBreakId());
+}
+
+
+void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Node* context = current_context();
+
+  // Build a new shared function info if we cannot find one in the baseline
+  // code. We also have a stack overflow if the recursive compilation did.
+  Handle<SharedFunctionInfo> shared_info =
+      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+  if (shared_info.is_null()) {
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
+    CHECK(!shared_info.is_null());  // TODO(mstarzinger): Set stack overflow?
+  }
+
+  // Create node to instantiate a new closure.
+  Node* info = jsgraph()->Constant(shared_info);
+  Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
+                                      : jsgraph()->FalseConstant();
+  const Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+  Node* value = NewNode(op, context, info, pretenure);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
+  // TODO(arv): Implement.
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitConditional(Conditional* expr) {
+  IfBuilder compare_if(this);
+  VisitForTest(expr->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(expr->then_expression());
+  compare_if.Else();
+  Visit(expr->else_expression());
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  Node* value = BuildVariableLoad(expr->var(), expr->id());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitLiteral(Literal* expr) {
+  Node* value = jsgraph()->Constant(expr->value());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to materialize a regular expression literal.
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* pattern = jsgraph()->Constant(expr->pattern());
+  Node* flags = jsgraph()->Constant(expr->flags());
+  const Operator* op =
+      javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+  ast_context()->ProduceValue(literal);
+}
+
+
+void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantProperties(isolate());
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_properties());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  const Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The object is expected on the operand stack during computation of the
+  // property values and is the value of the entire expression.
+  environment()->Push(literal);
+
+  // Mark all computed expressions that are bound to a key that is shadowed by
+  // a later occurrence of the same key. For the marked expressions, no store
+  // code is emitted.
+  expr->CalculateEmitStore(zone());
+
+  // Create nodes to store computed values into the literal.
+  AccessorTable accessor_table(zone());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForValue(property->value());
+            Node* value = environment()->Pop();
+            Unique<Name> name = MakeUnique(key->AsPropertyName());
+            Node* store = NewNode(javascript()->StoreNamed(strict_mode(), name),
+                                  literal, value);
+            PrepareFrameState(store, key->id());
+          } else {
+            VisitForEffect(property->value());
+          }
+          break;
+        }
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->key());
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* key = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          Node* strict = jsgraph()->Constant(SLOPPY);
+          const Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+          NewNode(op, receiver, key, value, strict);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          const Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+          NewNode(op, receiver, value);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = property->value();
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = property->value();
+        break;
+    }
+  }
+
+  // Create nodes to define accessors, using only a single call to the runtime
+  // for each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    VisitForValue(it->first);
+    VisitForValueOrNull(it->second->getter);
+    VisitForValueOrNull(it->second->setter);
+    Node* setter = environment()->Pop();
+    Node* getter = environment()->Pop();
+    Node* name = environment()->Pop();
+    Node* attr = jsgraph()->Constant(NONE);
+    const Operator* op =
+        javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+    Node* call = NewNode(op, literal, name, getter, setter, attr);
+    PrepareFrameState(call, it->first->id());
+  }
+
+  // Transform literals that contain functions to fast properties.
+  if (expr->has_function()) {
+    const Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+    NewNode(op, literal);
+  }
+
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantElements(isolate());
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_elements());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  const Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The array and the literal index are both expected on the operand stack
+  // during computation of the element values.
+  environment()->Push(literal);
+  environment()->Push(literal_index);
+
+  // Create nodes to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < expr->values()->length(); i++) {
+    Expression* subexpr = expr->values()->at(i);
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    VisitForValue(subexpr);
+    Node* value = environment()->Pop();
+    Node* index = jsgraph()->Constant(i);
+    Node* store = NewNode(javascript()->StoreProperty(strict_mode()), literal,
+                          index, value);
+    PrepareFrameState(store, expr->GetIdForElement(i));
+  }
+
+  environment()->Pop();  // Array literal index.
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+  DCHECK(expr->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr);
+
+  // Evaluate LHS expression and store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      // TODO(jarin) Fill in the correct bailout id.
+      BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      // TODO(jarin) Fill in the correct bailout id.
+      PrepareFrameState(store, BailoutId::None());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      // TODO(jarin) Fill in the correct bailout id.
+      PrepareFrameState(store, BailoutId::None());
+      break;
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->target());
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      VisitForValue(property->obj());
+      break;
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      break;
+    }
+  }
+
+  // Evaluate the value and potentially handle compound assignments by loading
+  // the left-hand side value and performing a binary operation.
+  if (expr->is_compound()) {
+    Node* old_value = NULL;
+    switch (assign_type) {
+      case VARIABLE: {
+        Variable* variable = expr->target()->AsVariableProxy()->var();
+        old_value = BuildVariableLoad(variable, expr->target()->id());
+        break;
+      }
+      case NAMED_PROPERTY: {
+        Node* object = environment()->Top();
+        Unique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        old_value = NewNode(javascript()->LoadNamed(name), object);
+        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        break;
+      }
+      case KEYED_PROPERTY: {
+        Node* key = environment()->Top();
+        Node* object = environment()->Peek(1);
+        old_value = NewNode(javascript()->LoadProperty(), object, key);
+        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        break;
+      }
+    }
+    environment()->Push(old_value);
+    VisitForValue(expr->value());
+    Node* right = environment()->Pop();
+    Node* left = environment()->Pop();
+    Node* value = BuildBinaryOp(left, right, expr->binary_op());
+    PrepareFrameState(value, expr->binary_operation()->id(), kPushOutput);
+    environment()->Push(value);
+  } else {
+    VisitForValue(expr->value());
+  }
+
+  // Store the value.
+  Node* value = environment()->Pop();
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->target()->AsVariableProxy()->var();
+      BuildVariableAssignment(variable, value, expr->op(),
+                              expr->AssignmentId());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      PrepareFrameState(store, expr->AssignmentId());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      PrepareFrameState(store, expr->AssignmentId());
+      break;
+    }
+  }
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitYield(Yield* expr) {
+  VisitForValue(expr->generator_object());
+  VisitForValue(expr->expression());
+  environment()->Pop();
+  environment()->Pop();
+  // TODO(turbofan): VisitYield
+  ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+}
+
+
+void AstGraphBuilder::VisitThrow(Throw* expr) {
+  VisitForValue(expr->exception());
+  Node* exception = environment()->Pop();
+  const Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+  Node* value = NewNode(op, exception);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitProperty(Property* expr) {
+  Node* value;
+  if (expr->key()->IsPropertyName()) {
+    VisitForValue(expr->obj());
+    Node* object = environment()->Pop();
+    Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
+    value = NewNode(javascript()->LoadNamed(name), object);
+  } else {
+    VisitForValue(expr->obj());
+    VisitForValue(expr->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->LoadProperty(), object, key);
+  }
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCall(Call* expr) {
+  Expression* callee = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  // Prepare the callee and the receiver to the function call. This depends on
+  // the semantics of the underlying call type.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = NULL;
+  Node* callee_value = NULL;
+  bool possibly_eval = false;
+  switch (call_type) {
+    case Call::GLOBAL_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      callee_value = BuildVariableLoad(variable, expr->expression()->id());
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+    }
+    case Call::LOOKUP_SLOT_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      DCHECK(variable->location() == Variable::LOOKUP);
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      callee_value = NewNode(common()->Projection(0), pair);
+      receiver_value = NewNode(common()->Projection(1), pair);
+      break;
+    }
+    case Call::PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      if (property->key()->IsPropertyName()) {
+        Unique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        callee_value = NewNode(javascript()->LoadNamed(name), object);
+      } else {
+        VisitForValue(property->key());
+        Node* key = environment()->Pop();
+        callee_value = NewNode(javascript()->LoadProperty(), object, key);
+      }
+      PrepareFrameState(callee_value, property->LoadId(), kPushOutput);
+      receiver_value = environment()->Pop();
+      // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
+      // object for sloppy callees. This could also be modeled explicitly here,
+      // thereby obsoleting the need for a flag to the call operator.
+      flags = CALL_AS_METHOD;
+      break;
+    }
+    case Call::POSSIBLY_EVAL_CALL:
+      possibly_eval = true;
+    // Fall through.
+    case Call::OTHER_CALL:
+      VisitForValue(callee);
+      callee_value = environment()->Pop();
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+  }
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the function call,
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Resolve callee and receiver for a potential direct eval call. This block
+  // will mutate the callee and receiver values pushed onto the environment.
+  if (possibly_eval && args->length() > 0) {
+    int arg_count = args->length();
+
+    // Extract callee and source string from the environment.
+    Node* callee = environment()->Peek(arg_count + 1);
+    Node* source = environment()->Peek(arg_count - 1);
+
+    // Create node to ask for help resolving potential eval call. This will
+    // provide a fully resolved callee and the corresponding receiver.
+    Node* function = GetFunctionClosure();
+    Node* receiver = environment()->Lookup(info()->scope()->receiver());
+    Node* strict = jsgraph()->Constant(strict_mode());
+    Node* position = jsgraph()->Constant(info()->scope()->start_position());
+    const Operator* op =
+        javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 6);
+    Node* pair =
+        NewNode(op, callee, source, function, receiver, strict, position);
+    Node* new_callee = NewNode(common()->Projection(0), pair);
+    Node* new_receiver = NewNode(common()->Projection(1), pair);
+
+    // Patch callee and receiver on the environment.
+    environment()->Poke(arg_count + 1, new_callee);
+    environment()->Poke(arg_count + 0, new_receiver);
+  }
+
+  // Create node to perform the function call.
+  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallNew(CallNew* expr) {
+  VisitForValue(expr->expression());
+
+  // Evaluate all arguments to the construct call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the construct call.
+  const Operator* call = javascript()->CallNew(args->length() + 1);
+  Node* value = ProcessArguments(call, args->length() + 1);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = BuildLoadBuiltinsObject();
+  Unique<String> unique = MakeUnique(name);
+  Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
+  // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
+  // refuses to optimize functions with jsruntime calls).
+  PrepareFrameState(callee_value, BailoutId::None(), kPushOutput);
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the JS runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the JS runtime call.
+  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
+
+  // Handle calls to runtime functions implemented in JavaScript separately as
+  // the call follows JavaScript ABI and the callee is statically unknown.
+  if (expr->is_jsruntime()) {
+    DCHECK(function == NULL && expr->name()->length() > 0);
+    return VisitCallJSRuntime(expr);
+  }
+
+  // Evaluate all arguments to the runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the runtime call.
+  Runtime::FunctionId functionId = function->function_id;
+  const Operator* call = javascript()->Runtime(functionId, args->length());
+  Node* value = ProcessArguments(call, args->length());
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE:
+      return VisitDelete(expr);
+    case Token::VOID:
+      return VisitVoid(expr);
+    case Token::TYPEOF:
+      return VisitTypeof(expr);
+    case Token::NOT:
+      return VisitNot(expr);
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->expression()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->expression());
+
+  // Reserve space for result of postfix operation.
+  bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
+  if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+
+  // Evaluate LHS expression and get old value.
+  Node* old_value = NULL;
+  int stack_depth = -1;
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      old_value = BuildVariableLoad(variable, expr->expression()->id());
+      stack_depth = 0;
+      break;
+    }
+    case NAMED_PROPERTY: {
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      old_value = NewNode(javascript()->LoadNamed(name), object);
+      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      stack_depth = 1;
+      break;
+    }
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Top();
+      Node* object = environment()->Peek(1);
+      old_value = NewNode(javascript()->LoadProperty(), object, key);
+      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      stack_depth = 2;
+      break;
+    }
+  }
+
+  // Convert old value into a number.
+  old_value = NewNode(javascript()->ToNumber(), old_value);
+
+  // Save result for postfix expressions at correct stack depth.
+  if (is_postfix) environment()->Poke(stack_depth, old_value);
+
+  // Create node to perform +1/-1 operation.
+  Node* value =
+      BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+  // TODO(jarin) Insert proper bailout id here (will need to change
+  // full code generator).
+  PrepareFrameState(value, BailoutId::None());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      environment()->Push(value);
+      BuildVariableAssignment(variable, value, expr->op(),
+                              expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      environment()->Push(value);
+      PrepareFrameState(store, expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      environment()->Push(value);
+      PrepareFrameState(store, expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+  }
+
+  // Restore old value for postfix expressions.
+  if (is_postfix) value = environment()->Pop();
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::COMMA:
+      return VisitComma(expr);
+    case Token::OR:
+    case Token::AND:
+      return VisitLogicalExpression(expr);
+    default: {
+      VisitForValue(expr->left());
+      VisitForValue(expr->right());
+      Node* right = environment()->Pop();
+      Node* left = environment()->Pop();
+      Node* value = BuildBinaryOp(left, right, expr->op());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      ast_context()->ProduceValue(value);
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  const Operator* op;
+  switch (expr->op()) {
+    case Token::EQ:
+      op = javascript()->Equal();
+      break;
+    case Token::NE:
+      op = javascript()->NotEqual();
+      break;
+    case Token::EQ_STRICT:
+      op = javascript()->StrictEqual();
+      break;
+    case Token::NE_STRICT:
+      op = javascript()->StrictNotEqual();
+      break;
+    case Token::LT:
+      op = javascript()->LessThan();
+      break;
+    case Token::GT:
+      op = javascript()->GreaterThan();
+      break;
+    case Token::LTE:
+      op = javascript()->LessThanOrEqual();
+      break;
+    case Token::GTE:
+      op = javascript()->GreaterThanOrEqual();
+      break;
+    case Token::INSTANCEOF:
+      op = javascript()->InstanceOf();
+      break;
+    case Token::IN:
+      op = javascript()->HasProperty();
+      break;
+    default:
+      op = NULL;
+      UNREACHABLE();
+  }
+  VisitForValue(expr->left());
+  VisitForValue(expr->right());
+  Node* right = environment()->Pop();
+  Node* left = environment()->Pop();
+  Node* value = NewNode(op, left, right);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  Node* value = GetFunctionClosure();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  DCHECK(globals()->is_empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (globals()->is_empty()) return;
+  Handle<FixedArray> data =
+      isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
+  for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                      DeclareGlobalsStrictMode::encode(strict_mode());
+  Node* flags = jsgraph()->Constant(encoded_flags);
+  Node* pairs = jsgraph()->Constant(data);
+  const Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+  NewNode(op, current_context(), pairs, flags);
+  globals()->Rewind(0);
+}
+
+
+void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
+  if (stmt == NULL) return;
+  Visit(stmt);
+}
+
+
+void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
+                                         LoopBuilder* loop, int drop_extra) {
+  BreakableScope scope(this, stmt, loop, drop_extra);
+  Visit(stmt->body());
+}
+
+
+void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Node* value;
+  if (expr->expression()->IsVariableProxy()) {
+    // Delete of an unqualified identifier is only allowed in classic mode but
+    // deleting "this" is allowed in all language modes.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    DCHECK(strict_mode() == SLOPPY || variable->is_this());
+    value = BuildVariableDelete(variable);
+  } else if (expr->expression()->IsProperty()) {
+    Property* property = expr->expression()->AsProperty();
+    VisitForValue(property->obj());
+    VisitForValue(property->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+  } else {
+    VisitForEffect(expr->expression());
+    value = jsgraph()->TrueConstant();
+  }
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  VisitForEffect(expr->expression());
+  Node* value = jsgraph()->UndefinedConstant();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  Node* operand;
+  if (expr->expression()->IsVariableProxy()) {
+    // Typeof does not throw a reference error on global variables, hence we
+    // perform a non-contextual load in case the operand is a variable proxy.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    operand =
+        BuildVariableLoad(variable, expr->expression()->id(), NOT_CONTEXTUAL);
+  } else {
+    VisitForValue(expr->expression());
+    operand = environment()->Pop();
+  }
+  Node* value = NewNode(javascript()->TypeOf(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
+  VisitForValue(expr->expression());
+  Node* operand = environment()->Pop();
+  // TODO(mstarzinger): Possible optimization when we are in effect context.
+  Node* value = NewNode(javascript()->UnaryNot(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
+  VisitForEffect(expr->left());
+  Visit(expr->right());
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  IfBuilder compare_if(this);
+  VisitForValue(expr->left());
+  Node* condition = environment()->Top();
+  compare_if.If(BuildToBoolean(condition));
+  compare_if.Then();
+  if (is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.Else();
+  if (!is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
+  DCHECK(environment()->stack_height() >= arity);
+  Node** all = info()->zone()->NewArray<Node*>(arity);
+  for (int i = arity - 1; i >= 0; --i) {
+    all[i] = environment()->Pop();
+  }
+  Node* value = NewNode(op, arity, all);
+  return value;
+}
+
+
+Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots <= 0) return context;
+  set_current_context(context);
+
+  // Allocate a new local context.
+  const Operator* op = javascript()->CreateFunctionContext();
+  Node* local_context = NewNode(op, closure);
+  set_current_context(local_context);
+
+  // Copy parameters into context if necessary.
+  int num_parameters = info()->scope()->num_parameters();
+  for (int i = 0; i < num_parameters; i++) {
+    Variable* variable = info()->scope()->parameter(i);
+    if (!variable->IsContextSlot()) continue;
+    // Temporary parameter node. The parameter indices are shifted by 1
+    // (receiver is parameter index -1 but environment index 0).
+    Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
+    // Context variable (at bottom of the context chain).
+    DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+    const Operator* op = javascript()->StoreContext(0, variable->index());
+    NewNode(op, local_context, parameter);
+  }
+
+  return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
+  if (arguments == NULL) return NULL;
+
+  // Allocate and initialize a new arguments object.
+  Node* callee = GetFunctionClosure();
+  const Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+  Node* object = NewNode(op, callee);
+
+  // Assign the object to the arguments variable.
+  DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
+  // This should never lazy deopt, so it is fine to send invalid bailout id.
+  BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None());
+
+  return object;
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
+                                            Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(for_hole);
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
+                                           Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(BuildThrowReferenceError(variable));
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
+                                         BailoutId bailout_id,
+                                         ContextualMode contextual_mode) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Unique<Name> name = MakeUnique(variable->name());
+      const Operator* op = javascript()->LoadNamed(name, contextual_mode);
+      Node* node = NewNode(op, global);
+      PrepareFrameState(node, bailout_id, kPushOutput);
+      return node;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      // Local var, const, or let variable.
+      Node* value = environment()->Lookup(variable);
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        if (value->op() == the_hole->op()) {
+          value = jsgraph()->UndefinedConstant();
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          Node* undefined = jsgraph()->UndefinedConstant();
+          value = BuildHoleCheckSilent(value, undefined, value);
+        }
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        if (value->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(value, variable, value);
+        }
+      }
+      return value;
+    }
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      bool immutable = variable->maybe_assigned() == kNotAssigned;
+      const Operator* op =
+          javascript()->LoadContext(depth, variable->index(), immutable);
+      Node* value = NewNode(op, current_context());
+      // TODO(titzer): initialization checks are redundant for already
+      // initialized immutable context loads, but only specialization knows.
+      // Maybe specializer should be a parameter to the graph builder?
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        Node* undefined = jsgraph()->UndefinedConstant();
+        value = BuildHoleCheckSilent(value, undefined, value);
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        value = BuildHoleCheckThrow(value, variable, value);
+      }
+      return value;
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Runtime::FunctionId function_id =
+          (contextual_mode == CONTEXTUAL)
+              ? Runtime::kLoadLookupSlot
+              : Runtime::kLoadLookupSlotNoReferenceError;
+      const Operator* op = javascript()->Runtime(function_id, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      return NewNode(common()->Projection(0), pair);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->DeleteProperty(strict_mode());
+      return NewNode(op, global, name);
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT:
+      // Local var, const, or let variable or context variable.
+      return variable->is_this() ? jsgraph()->TrueConstant()
+                                 : jsgraph()->FalseConstant();
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
+      return NewNode(op, current_context(), name);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+                                               Token::Value op,
+                                               BailoutId bailout_id) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Unique<Name> name = MakeUnique(variable->name());
+      const Operator* op = javascript()->StoreNamed(strict_mode(), name);
+      Node* store = NewNode(op, global, value);
+      PrepareFrameState(store, bailout_id);
+      return store;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      // Local var, const, or let variable.
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() != the_hole->op()) {
+          value = BuildHoleCheckSilent(current, value, current);
+        }
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        // Also note that the dynamic hole-check is only done to ensure that
+        // this does not break in the presence of do-expressions within the
+        // temporal dead zone of a let declared variable.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(current, variable, value);
+        }
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      environment()->Bind(variable, value);
+      return value;
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        const Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckSilent(current, value, current);
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        const Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckThrow(current, variable, value);
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      const Operator* op = javascript()->StoreContext(depth, variable->index());
+      return NewNode(op, current_context(), value);
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Node* strict = jsgraph()->Constant(strict_mode());
+      // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
+      // initializations of const declarations.
+      const Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+      return NewNode(op, value, current_context(), name, strict);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+  // TODO(sigurds) Use simplified load here once it is ready.
+  Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+                             jsgraph()->Int32Constant(offset - kHeapObjectTag));
+  return field_load;
+}
+
+
+Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
+  Node* global = BuildLoadGlobalObject();
+  Node* builtins =
+      BuildLoadObjectField(global, JSGlobalObject::kBuiltinsOffset);
+  return builtins;
+}
+
+
+Node* AstGraphBuilder::BuildLoadGlobalObject() {
+  Node* context = GetFunctionContext();
+  const Operator* load_op =
+      javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
+  return NewNode(load_op, context);
+}
+
+
+Node* AstGraphBuilder::BuildToBoolean(Node* value) {
+  // TODO(mstarzinger): Possible optimization is to NOP for boolean values.
+  return NewNode(javascript()->ToBoolean(), value);
+}
+
+
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
+  // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+  Node* variable_name = jsgraph()->Constant(variable->name());
+  const Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
+  return NewNode(op, variable_name);
+}
+
+
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+  const Operator* js_op;
+  switch (op) {
+    case Token::BIT_OR:
+      js_op = javascript()->BitwiseOr();
+      break;
+    case Token::BIT_AND:
+      js_op = javascript()->BitwiseAnd();
+      break;
+    case Token::BIT_XOR:
+      js_op = javascript()->BitwiseXor();
+      break;
+    case Token::SHL:
+      js_op = javascript()->ShiftLeft();
+      break;
+    case Token::SAR:
+      js_op = javascript()->ShiftRight();
+      break;
+    case Token::SHR:
+      js_op = javascript()->ShiftRightLogical();
+      break;
+    case Token::ADD:
+      js_op = javascript()->Add();
+      break;
+    case Token::SUB:
+      js_op = javascript()->Subtract();
+      break;
+    case Token::MUL:
+      js_op = javascript()->Multiply();
+      break;
+    case Token::DIV:
+      js_op = javascript()->Divide();
+      break;
+    case Token::MOD:
+      js_op = javascript()->Modulus();
+      break;
+    default:
+      UNREACHABLE();
+      js_op = NULL;
+  }
+  return NewNode(js_op, left, right);
+}
+
+
+void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
+                                        OutputFrameStateCombine combine) {
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    DCHECK(NodeProperties::GetFrameStateInput(node)->opcode() ==
+           IrOpcode::kDead);
+    NodeProperties::ReplaceFrameStateInput(
+        node, environment()->Checkpoint(ast_id, combine));
+  }
+}
+
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
new file mode 100644
index 0000000..6a7e3db
--- /dev/null
+++ b/src/compiler/ast-graph-builder.h
@@ -0,0 +1,430 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
+#define V8_COMPILER_AST_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlBuilder;
+class LoopBuilder;
+class Graph;
+
+// The AstGraphBuilder produces a high-level IR graph, based on an
+// underlying AST. The produced graph can either be compiled into a
+// stand-alone function or be wired into another graph for the purposes
+// of function inlining.
+class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+ public:
+  AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph);
+
+  // Creates a graph by visiting the entire AST.
+  bool CreateGraph();
+
+ protected:
+  class AstContext;
+  class AstEffectContext;
+  class AstValueContext;
+  class AstTestContext;
+  class BreakableScope;
+  class ContextScope;
+  class Environment;
+
+  Environment* environment() {
+    return reinterpret_cast<Environment*>(
+        StructuredGraphBuilder::environment());
+  }
+
+  AstContext* ast_context() const { return ast_context_; }
+  BreakableScope* breakable() const { return breakable_; }
+  ContextScope* execution_context() const { return execution_context_; }
+
+  void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+  void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+  void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  typedef StructuredGraphBuilder::Environment BaseEnvironment;
+  virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
+
+  // TODO(mstarzinger): The pipeline only needs to be a friend to access the
+  // function context. Remove as soon as the context is a parameter.
+  friend class Pipeline;
+
+  // Getters for values in the activation record.
+  Node* GetFunctionClosure();
+  Node* GetFunctionContext();
+
+  //
+  // The following build methods all generate graph fragments and return one
+  // resulting node. The operand stack height remains the same, variables and
+  // other dependencies tracked by the environment might be mutated though.
+  //
+
+  // Builder to create a local function context.
+  Node* BuildLocalFunctionContext(Node* context, Node* closure);
+
+  // Builder to create an arguments object if it is used.
+  Node* BuildArgumentsObject(Variable* arguments);
+
+  // Builders for variable load and assignment.
+  Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
+                                BailoutId bailout_id);
+  Node* BuildVariableDelete(Variable* var);
+  Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
+                          ContextualMode mode = CONTEXTUAL);
+
+  // Builders for accessing the function context.
+  Node* BuildLoadBuiltinsObject();
+  Node* BuildLoadGlobalObject();
+  Node* BuildLoadClosure();
+  Node* BuildLoadObjectField(Node* object, int offset);
+
+  // Builders for automatic type conversion.
+  Node* BuildToBoolean(Node* value);
+
+  // Builders for error reporting at runtime.
+  Node* BuildThrowReferenceError(Variable* var);
+
+  // Builders for dynamic hole-checks at runtime.
+  Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+  Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+
+  // Builders for binary operations.
+  Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  // Visiting functions for AST nodes make this an AstVisitor.
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Visiting function for declarations list is overridden.
+  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ private:
+  CompilationInfo* info_;
+  AstContext* ast_context_;
+  JSGraph* jsgraph_;
+
+  // List of global declarations for functions and variables.
+  ZoneList<Handle<Object> > globals_;
+
+  // Stack of breakable statements entered by the visitor.
+  BreakableScope* breakable_;
+
+  // Stack of context objects pushed onto the chain by the visitor.
+  ContextScope* execution_context_;
+
+  // Nodes representing values in the activation record.
+  SetOncePointer<Node> function_closure_;
+  SetOncePointer<Node> function_context_;
+
+  CompilationInfo* info() { return info_; }
+  StrictMode strict_mode() { return info()->strict_mode(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  ZoneList<Handle<Object> >* globals() { return &globals_; }
+
+  // Current scope during visitation.
+  inline Scope* current_scope() const;
+
+  // Process arguments to a call by popping {arity} elements off the operand
+  // stack and build a call node using the given call operator.
+  Node* ProcessArguments(const Operator* op, int arity);
+
+  // Visit statements.
+  void VisitIfNotNull(Statement* stmt);
+
+  // Visit expressions.
+  void VisitForTest(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForValue(Expression* expr);
+  void VisitForValueOrNull(Expression* expr);
+  void VisitForValues(ZoneList<Expression*>* exprs);
+
+  // Common for all IterationStatement bodies.
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+
+  // Dispatched from VisitCallRuntime.
+  void VisitCallJSRuntime(CallRuntime* expr);
+
+  // Dispatched from VisitUnaryOperation.
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  // Dispatched from VisitBinaryOperation.
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
+
+  // Dispatched from VisitForInStatement.
+  void VisitForInAssignment(Expression* expr, Node* value);
+
+  // Builds deoptimization for a given node.
+  void PrepareFrameState(Node* node, BailoutId ast_id,
+                         OutputFrameStateCombine combine = kIgnoreOutput);
+
+  OutputFrameStateCombine StateCombineFromAstContext();
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
+};
+
+
+// The abstract execution environment for generated code consists of
+// parameter variables, local variables and the operand stack. The
+// environment will perform proper SSA-renaming of all tracked nodes
+// at split and merge points in the control flow. Internally all the
+// values are stored in one list using the following layout:
+//
+//  [parameters (+receiver)] [locals] [operand stack]
+//
+class AstGraphBuilder::Environment
+    : public StructuredGraphBuilder::Environment {
+ public:
+  Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  int parameters_count() const { return parameters_count_; }
+  int locals_count() const { return locals_count_; }
+  int stack_height() {
+    return static_cast<int>(values()->size()) - parameters_count_ -
+           locals_count_;
+  }
+
+  // Operations on parameter or local variables. The parameter indices are
+  // shifted by 1 (receiver is parameter index -1 but environment index 0).
+  void Bind(Variable* variable, Node* node) {
+    DCHECK(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      values()->at(variable->index() + 1) = node;
+    } else {
+      DCHECK(variable->IsStackLocal());
+      values()->at(variable->index() + parameters_count_) = node;
+    }
+  }
+  Node* Lookup(Variable* variable) {
+    DCHECK(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      return values()->at(variable->index() + 1);
+    } else {
+      DCHECK(variable->IsStackLocal());
+      return values()->at(variable->index() + parameters_count_);
+    }
+  }
+
+  // Operations on the operand stack.
+  void Push(Node* node) {
+    values()->push_back(node);
+  }
+  Node* Top() {
+    DCHECK(stack_height() > 0);
+    return values()->back();
+  }
+  Node* Pop() {
+    DCHECK(stack_height() > 0);
+    Node* back = values()->back();
+    values()->pop_back();
+    return back;
+  }
+
+  // Direct mutations of the operand stack.
+  void Poke(int depth, Node* node) {
+    DCHECK(depth >= 0 && depth < stack_height());
+    int index = static_cast<int>(values()->size()) - depth - 1;
+    values()->at(index) = node;
+  }
+  Node* Peek(int depth) {
+    DCHECK(depth >= 0 && depth < stack_height());
+    int index = static_cast<int>(values()->size()) - depth - 1;
+    return values()->at(index);
+  }
+  void Drop(int depth) {
+    DCHECK(depth >= 0 && depth <= stack_height());
+    values()->erase(values()->end() - depth, values()->end());
+  }
+
+  // Preserve a checkpoint of the environment for the IR graph. Any
+  // further mutation of the environment will not affect checkpoints.
+  Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
+
+ protected:
+  AstGraphBuilder* builder() const {
+    return reinterpret_cast<AstGraphBuilder*>(
+        StructuredGraphBuilder::Environment::builder());
+  }
+
+ private:
+  void UpdateStateValues(Node** state_values, int offset, int count);
+
+  int parameters_count_;
+  int locals_count_;
+  Node* parameters_node_;
+  Node* locals_node_;
+  Node* stack_node_;
+};
+
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+  // Determines how to combine the frame state with the value
+  // that is about to be plugged into this AstContext.
+  OutputFrameStateCombine GetStateCombine() {
+    return IsEffect() ? kIgnoreOutput : kPushOutput;
+  }
+
+  // Plug a node into this expression context.  Call this function in tail
+  // position in the Visit functions for expressions.
+  virtual void ProduceValue(Node* value) = 0;
+
+  // Unplugs a node from this expression context.  Call this to retrieve the
+  // result of another Visit function that already plugged the context.
+  virtual Node* ConsumeValue() = 0;
+
+  // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+  void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+  AstContext(AstGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+  AstGraphBuilder* owner() const { return owner_; }
+  Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+  int original_height_;
+#endif
+
+ private:
+  Expression::Context kind_;
+  AstGraphBuilder* owner_;
+  AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
+ public:
+  explicit AstEffectContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {}
+  virtual ~AstEffectContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext FINAL : public AstContext {
+ public:
+  explicit AstValueContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kValue) {}
+  virtual ~AstValueContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext FINAL : public AstContext {
+ public:
+  explicit AstTestContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kTest) {}
+  virtual ~AstTestContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Scoped class tracking breakable statements entered by the visitor. Allows to
+// properly 'break' and 'continue' iteration statements as well as to 'break'
+// from blocks within switch statements.
+class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
+ public:
+  BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
+                 ControlBuilder* control, int drop_extra)
+      : owner_(owner),
+        target_(target),
+        next_(owner->breakable()),
+        control_(control),
+        drop_extra_(drop_extra) {
+    owner_->set_breakable(this);  // Push.
+  }
+
+  ~BreakableScope() {
+    owner_->set_breakable(next_);  // Pop.
+  }
+
+  // Either 'break' or 'continue' the target statement.
+  void BreakTarget(BreakableStatement* target);
+  void ContinueTarget(BreakableStatement* target);
+
+ private:
+  AstGraphBuilder* owner_;
+  BreakableStatement* target_;
+  BreakableScope* next_;
+  ControlBuilder* control_;
+  int drop_extra_;
+
+  // Find the correct scope for the target statement. Note that this also drops
+  // extra operands from the environment for each scope skipped along the way.
+  BreakableScope* FindBreakable(BreakableStatement* target);
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+  ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
+      : owner_(owner),
+        next_(owner->execution_context()),
+        outer_(owner->current_context()),
+        scope_(scope) {
+    owner_->set_execution_context(this);  // Push.
+    owner_->set_current_context(context);
+  }
+
+  ~ContextScope() {
+    owner_->set_execution_context(next_);  // Pop.
+    owner_->set_current_context(outer_);
+  }
+
+  // Current scope during visitation.
+  Scope* scope() const { return scope_; }
+
+ private:
+  AstGraphBuilder* owner_;
+  ContextScope* next_;
+  Node* outer_;
+  Scope* scope_;
+};
+
+Scope* AstGraphBuilder::current_scope() const {
+  return execution_context_->scope();
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/src/compiler/change-lowering-unittest.cc b/src/compiler/change-lowering-unittest.cc
new file mode 100644
index 0000000..994027a
--- /dev/null
+++ b/src/compiler/change-lowering-unittest.cc
@@ -0,0 +1,476 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/compiler-test-utils.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Find a new home for these functions.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+  OStringStream ost;
+  ost << type;
+  return os << ost.c_str();
+}
+
+
+class ChangeLoweringTest : public GraphTest {
+ public:
+  ChangeLoweringTest() : simplified_(zone()) {}
+  virtual ~ChangeLoweringTest() {}
+
+  virtual MachineType WordRepresentation() const = 0;
+
+ protected:
+  int HeapNumberValueOffset() const {
+    STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
+    return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
+           kHeapObjectTag;
+  }
+  bool Is32() const { return WordRepresentation() == kRepWord32; }
+  int PointerSize() const {
+    switch (WordRepresentation()) {
+      case kRepWord32:
+        return 4;
+      case kRepWord64:
+        return 8;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return 0;
+  }
+  int SmiMaxValue() const { return -(SmiMinValue() + 1); }
+  int SmiMinValue() const {
+    return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
+  }
+  int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
+  int SmiShiftSize() const {
+    return Is32() ? SmiTagging<4>::SmiShiftSize()
+                  : SmiTagging<8>::SmiShiftSize();
+  }
+  int SmiValueSize() const {
+    return Is32() ? SmiTagging<4>::SmiValueSize()
+                  : SmiTagging<8>::SmiValueSize();
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph()->NewNode(common()->Parameter(index), graph()->start());
+  }
+
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine(WordRepresentation());
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+    CompilationInfo info(isolate(), zone());
+    Linkage linkage(&info);
+    ChangeLowering reducer(&jsgraph, &linkage);
+    return reducer.Reduce(node);
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+  Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
+                                      const Matcher<Node*>& control_matcher) {
+    return IsCall(
+        _, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
+               CEntryStub(isolate(), 1).GetCode())),
+        IsExternalConstant(ExternalReference(
+            Runtime::FunctionForId(Runtime::kAllocateHeapNumber), isolate())),
+        IsInt32Constant(0), IsNumberConstant(0.0), effect_matcher,
+        control_matcher);
+  }
+  Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher) {
+    return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
+                  : IsWord64Equal(lhs_matcher, rhs_matcher);
+  }
+
+ private:
+  SimplifiedOperatorBuilder simplified_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Common.
+
+
+class ChangeLoweringCommonTest
+    : public ChangeLoweringTest,
+      public ::testing::WithParamInterface<MachineType> {
+ public:
+  virtual ~ChangeLoweringCommonTest() {}
+
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return GetParam();
+  }
+};
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch;
+  EXPECT_THAT(phi,
+              IsPhi(static_cast<MachineType>(kTypeBool | kRepTagged),
+                    IsTrueConstant(), IsFalseConstant(),
+                    IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+                                           IsBranch(val, graph()->start()))),
+                            IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* finish = reduction.replacement();
+  Capture<Node*> heap_number;
+  EXPECT_THAT(
+      finish,
+      IsFinish(
+          AllOf(CaptureEq(&heap_number),
+                IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
+          IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                  IsInt32Constant(HeapNumberValueOffset()), val,
+                  CaptureEq(&heap_number), graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
+  Node* node =
+      graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
+  Reduction reduction = Reduce(node);
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
+                        ::testing::Values(kRepWord32, kRepWord64));
+
+
+// -----------------------------------------------------------------------------
+// 32-bit
+
+
+class ChangeLowering32Test : public ChangeLoweringTest {
+ public:
+  virtual ~ChangeLowering32Test() {}
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return kRepWord32;
+  }
+};
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> add, branch, heap_number, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachAnyTagged,
+            IsFinish(
+                AllOf(CaptureEq(&heap_number),
+                      IsAllocateHeapNumber(_, CaptureEq(&if_true))),
+                IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                        IsInt32Constant(HeapNumberValueOffset()),
+                        IsChangeInt32ToFloat64(val), CaptureEq(&heap_number),
+                        CaptureEq(&if_true))),
+            IsProjection(
+                0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(CaptureEq(&branch),
+                                    IsBranch(IsProjection(1, CaptureEq(&add)),
+                                             graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachFloat64,
+          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                 IsControlEffect(CaptureEq(&if_true))),
+          IsChangeInt32ToFloat64(
+              IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
+          IsMerge(
+              AllOf(CaptureEq(&if_true),
+                    IsIfTrue(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start())))),
+              IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachInt32,
+            IsChangeFloat64ToInt32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachUint32,
+            IsChangeFloat64ToUint32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, heap_number, if_false;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
+          IsFinish(
+              AllOf(CaptureEq(&heap_number),
+                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                      IsInt32Constant(HeapNumberValueOffset()),
+                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+                      CaptureEq(&if_false))),
+          IsMerge(
+              IsIfTrue(AllOf(CaptureEq(&branch),
+                             IsBranch(IsUint32LessThanOrEqual(
+                                          val, IsInt32Constant(SmiMaxValue())),
+                                      graph()->start()))),
+              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// 64-bit
+
+
+class ChangeLowering64Test : public ChangeLoweringTest {
+ public:
+  virtual ~ChangeLowering64Test() {}
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return kRepWord64;
+  }
+};
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  EXPECT_THAT(reduction.replacement(),
+              IsWord64Shl(IsChangeInt32ToInt64(val),
+                          IsInt32Constant(SmiShiftAmount())));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachFloat64,
+          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                 IsControlEffect(CaptureEq(&if_true))),
+          IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
+              IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
+          IsMerge(
+              AllOf(CaptureEq(&if_true),
+                    IsIfTrue(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start())))),
+              IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachInt32,
+            IsChangeFloat64ToInt32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsTruncateInt64ToInt32(
+                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachUint32,
+            IsChangeFloat64ToUint32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsTruncateInt64ToInt32(
+                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, heap_number, if_false;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
+                                      IsInt32Constant(SmiShiftAmount())),
+          IsFinish(
+              AllOf(CaptureEq(&heap_number),
+                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                      IsInt32Constant(HeapNumberValueOffset()),
+                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+                      CaptureEq(&if_false))),
+          IsMerge(
+              IsIfTrue(AllOf(CaptureEq(&branch),
+                             IsBranch(IsUint32LessThanOrEqual(
+                                          val, IsInt32Constant(SmiMaxValue())),
+                                      graph()->start()))),
+              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
new file mode 100644
index 0000000..b13db4c
--- /dev/null
+++ b/src/compiler/change-lowering.cc
@@ -0,0 +1,256 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/machine-operator.h"
+
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ChangeLowering::~ChangeLowering() {}
+
+
+Reduction ChangeLowering::Reduce(Node* node) {
+  Node* control = graph()->start();
+  switch (node->opcode()) {
+    case IrOpcode::kChangeBitToBool:
+      return ChangeBitToBool(node->InputAt(0), control);
+    case IrOpcode::kChangeBoolToBit:
+      return ChangeBoolToBit(node->InputAt(0));
+    case IrOpcode::kChangeFloat64ToTagged:
+      return ChangeFloat64ToTagged(node->InputAt(0), control);
+    case IrOpcode::kChangeInt32ToTagged:
+      return ChangeInt32ToTagged(node->InputAt(0), control);
+    case IrOpcode::kChangeTaggedToFloat64:
+      return ChangeTaggedToFloat64(node->InputAt(0), control);
+    case IrOpcode::kChangeTaggedToInt32:
+      return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
+    case IrOpcode::kChangeTaggedToUint32:
+      return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
+    case IrOpcode::kChangeUint32ToTagged:
+      return ChangeUint32ToTagged(node->InputAt(0), control);
+    default:
+      return NoChange();
+  }
+  UNREACHABLE();
+  return NoChange();
+}
+
+
+Node* ChangeLowering::HeapNumberValueIndexConstant() {
+  STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
+  const int heap_number_value_offset =
+      ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
+  return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
+}
+
+
+Node* ChangeLowering::SmiMaxValueConstant() {
+  const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
+                                               : SmiTagging<8>::SmiValueSize();
+  return jsgraph()->Int32Constant(
+      -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
+}
+
+
+Node* ChangeLowering::SmiShiftBitsConstant() {
+  const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
+                                               : SmiTagging<8>::SmiShiftSize();
+  return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
+}
+
+
+Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
+  // The AllocateHeapNumber() runtime function does not use the context, so we
+  // can safely pass in Smi zero here.
+  Node* context = jsgraph()->ZeroConstant();
+  Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
+  const Runtime::Function* function =
+      Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
+  DCHECK_EQ(0, function->nargs);
+  CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
+      function->function_id, 0, Operator::kNoProperties);
+  Node* heap_number = graph()->NewNode(
+      common()->Call(desc), jsgraph()->CEntryStubConstant(),
+      jsgraph()->ExternalConstant(ExternalReference(function, isolate())),
+      jsgraph()->Int32Constant(function->nargs), context, effect, control);
+  Node* store = graph()->NewNode(
+      machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+      heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
+  return graph()->NewNode(common()->Finish(1), heap_number, store);
+}
+
+
+Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
+  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+  }
+  return value;
+}
+
+
+Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
+  return graph()->NewNode(machine()->Load(kMachFloat64), value,
+                          HeapNumberValueIndexConstant(),
+                          graph()->NewNode(common()->ControlEffect(), control));
+}
+
+
+Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
+  Node* branch = graph()->NewNode(common()->Branch(), val, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* true_value = jsgraph()->TrueConstant();
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* false_value = jsgraph()->FalseConstant();
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(
+      common()->Phi(static_cast<MachineType>(kTypeBool | kRepTagged), 2),
+      true_value, false_value, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeBoolToBit(Node* val) {
+  return Replace(
+      graph()->NewNode(machine()->WordEqual(), val, jsgraph()->TrueConstant()));
+}
+
+
+Reduction ChangeLowering::ChangeFloat64ToTagged(Node* val, Node* control) {
+  return Replace(AllocateHeapNumberWithValue(val, control));
+}
+
+
+Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
+  if (machine()->Is64()) {
+    return Replace(
+        graph()->NewNode(machine()->Word64Shl(),
+                         graph()->NewNode(machine()->ChangeInt32ToInt64(), val),
+                         SmiShiftBitsConstant()));
+  }
+
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
+  Node* ovf = graph()->NewNode(common()->Projection(1), add);
+
+  Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* heap_number = AllocateHeapNumberWithValue(
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), if_true);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* smi = graph()->NewNode(common()->Projection(0), add);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), heap_number,
+                               smi, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
+                                             Signedness signedness) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+                               jsgraph()->Int32Constant(kSmiTagMask));
+  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  const Operator* op = (signedness == kSigned)
+                           ? machine()->ChangeFloat64ToInt32()
+                           : machine()->ChangeFloat64ToUint32();
+  Node* change = graph()->NewNode(op, LoadHeapNumberValue(val, if_true));
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* number = ChangeSmiToInt32(val);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(
+      common()->Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, 2),
+      change, number, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+                               jsgraph()->Int32Constant(kSmiTagMask));
+  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* load = LoadHeapNumberValue(val, if_true);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(),
+                                  ChangeSmiToInt32(val));
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi =
+      graph()->NewNode(common()->Phi(kMachFloat64, 2), load, number, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeUint32ToTagged(Node* val, Node* control) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
+                               SmiMaxValueConstant());
+  Node* branch = graph()->NewNode(common()->Branch(), cmp, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* smi = graph()->NewNode(
+      machine()->WordShl(),
+      machine()->Is64()
+          ? graph()->NewNode(machine()->ChangeUint32ToUint64(), val)
+          : val,
+      SmiShiftBitsConstant());
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* heap_number = AllocateHeapNumberWithValue(
+      graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), if_false);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), smi,
+                               heap_number, merge);
+
+  return Replace(phi);
+}
+
+
+Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* ChangeLowering::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* ChangeLowering::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
new file mode 100644
index 0000000..5d7ab41
--- /dev/null
+++ b/src/compiler/change-lowering.h
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHANGE_LOWERING_H_
+#define V8_COMPILER_CHANGE_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class Linkage;
+class MachineOperatorBuilder;
+
+class ChangeLowering FINAL : public Reducer {
+ public:
+  ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
+      : jsgraph_(jsgraph), linkage_(linkage) {}
+  virtual ~ChangeLowering();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Node* HeapNumberValueIndexConstant();
+  Node* SmiMaxValueConstant();
+  Node* SmiShiftBitsConstant();
+
+  Node* AllocateHeapNumberWithValue(Node* value, Node* control);
+  Node* ChangeSmiToInt32(Node* value);
+  Node* LoadHeapNumberValue(Node* value, Node* control);
+
+  Reduction ChangeBitToBool(Node* val, Node* control);
+  Reduction ChangeBoolToBit(Node* val);
+  Reduction ChangeFloat64ToTagged(Node* val, Node* control);
+  Reduction ChangeInt32ToTagged(Node* val, Node* control);
+  Reduction ChangeTaggedToFloat64(Node* val, Node* control);
+  Reduction ChangeTaggedToUI32(Node* val, Node* control, Signedness signedness);
+  Reduction ChangeUint32ToTagged(Node* val, Node* control);
+
+  Graph* graph() const;
+  Isolate* isolate() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Linkage* linkage() const { return linkage_; }
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+  Linkage* linkage_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
new file mode 100644
index 0000000..a3f7e4c
--- /dev/null
+++ b/src/compiler/code-generator-impl.h
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Converts InstructionOperands from a given instruction to
+// architecture-specific
+// registers and operands after they have been assigned by the register
+// allocator.
+class InstructionOperandConverter {
+ public:
+  InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : gen_(gen), instr_(instr) {}
+
+  Register InputRegister(int index) {
+    return ToRegister(instr_->InputAt(index));
+  }
+
+  DoubleRegister InputDoubleRegister(int index) {
+    return ToDoubleRegister(instr_->InputAt(index));
+  }
+
+  double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+
+  int32_t InputInt32(int index) {
+    return ToConstant(instr_->InputAt(index)).ToInt32();
+  }
+
+  int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+
+  int16_t InputInt16(int index) {
+    return static_cast<int16_t>(InputInt32(index));
+  }
+
+  uint8_t InputInt5(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x1F);
+  }
+
+  uint8_t InputInt6(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x3F);
+  }
+
+  Handle<HeapObject> InputHeapObject(int index) {
+    return ToHeapObject(instr_->InputAt(index));
+  }
+
+  Label* InputLabel(int index) {
+    return gen_->code()->GetLabel(InputBlock(index));
+  }
+
+  BasicBlock* InputBlock(int index) {
+    NodeId block_id = static_cast<NodeId>(InputInt32(index));
+    // operand should be a block id.
+    DCHECK(block_id >= 0);
+    DCHECK(block_id < gen_->schedule()->BasicBlockCount());
+    return gen_->schedule()->GetBlockById(block_id);
+  }
+
+  Register OutputRegister(int index = 0) {
+    return ToRegister(instr_->OutputAt(index));
+  }
+
+  DoubleRegister OutputDoubleRegister() {
+    return ToDoubleRegister(instr_->Output());
+  }
+
+  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
+  Register ToRegister(InstructionOperand* op) {
+    DCHECK(op->IsRegister());
+    return Register::FromAllocationIndex(op->index());
+  }
+
+  DoubleRegister ToDoubleRegister(InstructionOperand* op) {
+    DCHECK(op->IsDoubleRegister());
+    return DoubleRegister::FromAllocationIndex(op->index());
+  }
+
+  Constant ToConstant(InstructionOperand* operand) {
+    if (operand->IsImmediate()) {
+      return gen_->code()->GetImmediate(operand->index());
+    }
+    return gen_->code()->GetConstant(operand->index());
+  }
+
+  double ToDouble(InstructionOperand* operand) {
+    return ToConstant(operand).ToFloat64();
+  }
+
+  Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
+    return ToConstant(operand).ToHeapObject();
+  }
+
+  Frame* frame() const { return gen_->frame(); }
+  Isolate* isolate() const { return gen_->isolate(); }
+  Linkage* linkage() const { return gen_->linkage(); }
+
+ protected:
+  CodeGenerator* gen_;
+  Instruction* instr_;
+};
+
+
+// TODO(dcarney): generify this on bleeding_edge and replace this call
+// when merged.
+static inline void FinishCode(MacroAssembler* masm) {
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+  masm->CheckConstPool(true, false);
+#endif
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_IMPL_H
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
new file mode 100644
index 0000000..f22c479
--- /dev/null
+++ b/src/compiler/code-generator.cc
@@ -0,0 +1,460 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeGenerator::CodeGenerator(InstructionSequence* code)
+    : code_(code),
+      current_block_(NULL),
+      current_source_position_(SourcePosition::Invalid()),
+      masm_(code->zone()->isolate(), NULL, 0),
+      resolver_(this),
+      safepoints_(code->zone()),
+      deoptimization_states_(code->zone()),
+      deoptimization_literals_(code->zone()),
+      translations_(code->zone()),
+      last_lazy_deopt_pc_(0) {}
+
+
+Handle<Code> CodeGenerator::GenerateCode() {
+  CompilationInfo* info = linkage()->info();
+
+  // Emit a code line info recording start event.
+  PositionsRecorder* recorder = masm()->positions_recorder();
+  LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
+
+  // Place function entry hook if requested to do so.
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm());
+  }
+
+  // Architecture-specific, linkage-specific prologue.
+  info->set_prologue_offset(masm()->pc_offset());
+  AssemblePrologue();
+
+  // Assemble all instructions.
+  for (InstructionSequence::const_iterator i = code()->begin();
+       i != code()->end(); ++i) {
+    AssembleInstruction(*i);
+  }
+
+  FinishCode(masm());
+
+  // Ensure there is space for lazy deopt.
+  if (!info->IsStub()) {
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
+
+  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
+  // TODO(titzer): what are the right code flags here?
+  Code::Kind kind = Code::STUB;
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    kind = Code::OPTIMIZED_FUNCTION;
+  }
+  Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+      masm(), Code::ComputeFlags(kind), info);
+  result->set_is_turbofanned(true);
+  result->set_stack_slots(frame()->GetSpillSlotCount());
+  result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+
+  PopulateDeoptimizationData(result);
+
+  // Emit a code line info recording stop event.
+  void* line_info = recorder->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+
+  return result;
+}
+
+
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                                    int arguments,
+                                    Safepoint::DeoptMode deopt_mode) {
+  const ZoneList<InstructionOperand*>* operands =
+      pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    InstructionOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      Register reg = Register::FromAllocationIndex(pointer->index());
+      safepoint.DefinePointerRegister(reg, zone());
+    }
+  }
+}
+
+
+void CodeGenerator::AssembleInstruction(Instruction* instr) {
+  if (instr->IsBlockStart()) {
+    // Bind a label for a block start and handle parallel moves.
+    BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
+    current_block_ = block_start->block();
+    if (FLAG_code_comments) {
+      // TODO(titzer): these code comments are a giant memory leak.
+      Vector<char> buffer = Vector<char>::New(32);
+      SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
+      masm()->RecordComment(buffer.start());
+    }
+    masm()->bind(block_start->label());
+  }
+  if (instr->IsGapMoves()) {
+    // Handle parallel moves associated with the gap instruction.
+    AssembleGap(GapInstruction::cast(instr));
+  } else if (instr->IsSourcePosition()) {
+    AssembleSourcePosition(SourcePositionInstruction::cast(instr));
+  } else {
+    // Assemble architecture-specific code for the instruction.
+    AssembleArchInstruction(instr);
+
+    // Assemble branches or boolean materializations after this instruction.
+    FlagsMode mode = FlagsModeField::decode(instr->opcode());
+    FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+    switch (mode) {
+      case kFlags_none:
+        return;
+      case kFlags_set:
+        return AssembleArchBoolean(instr, condition);
+      case kFlags_branch:
+        return AssembleArchBranch(instr, condition);
+    }
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
+  SourcePosition source_position = instr->source_position();
+  if (source_position == current_source_position_) return;
+  DCHECK(!source_position.IsInvalid());
+  if (!source_position.IsUnknown()) {
+    int code_pos = source_position.raw();
+    masm()->positions_recorder()->RecordPosition(source_position.raw());
+    masm()->positions_recorder()->WriteRecordedPositions();
+    if (FLAG_code_comments) {
+      Vector<char> buffer = Vector<char>::New(256);
+      CompilationInfo* info = linkage()->info();
+      int ln = Script::GetLineNumber(info->script(), code_pos);
+      int cn = Script::GetColumnNumber(info->script(), code_pos);
+      if (info->script()->name()->IsString()) {
+        Handle<String> file(String::cast(info->script()->name()));
+        base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
+                           file->ToCString().get(), ln, cn);
+      } else {
+        base::OS::SNPrintF(buffer.start(), buffer.length(),
+                           "-- <unknown>:%d:%d --", ln, cn);
+      }
+      masm()->RecordComment(buffer.start());
+    }
+  }
+  current_source_position_ = source_position;
+}
+
+
+void CodeGenerator::AssembleGap(GapInstruction* instr) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    ParallelMove* move = instr->GetParallelMove(inner_pos);
+    if (move != NULL) resolver()->Resolve(move);
+  }
+}
+
+
+void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+  CompilationInfo* info = linkage()->info();
+  int deopt_count = static_cast<int>(deoptimization_states_.size());
+  if (deopt_count == 0) return;
+  Handle<DeoptimizationInputData> data =
+      DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
+
+  Handle<ByteArray> translation_array =
+      translations_.CreateByteArray(isolate()->factory());
+
+  data->SetTranslationByteArray(*translation_array);
+  data->SetInlinedFunctionCount(Smi::FromInt(0));
+  data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
+  // TODO(jarin) The following code was copied over from Lithium, not sure
+  // whether the scope or the IsOptimizing condition are really needed.
+  if (info->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+
+  Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
+      static_cast<int>(deoptimization_literals_.size()), TENURED);
+  {
+    AllowDeferredHandleDereference copy_handles;
+    for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  // No OSR in Turbofan yet...
+  BailoutId osr_ast_id = BailoutId::None();
+  data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(-1));
+
+  // Populate deoptimization entries.
+  for (int i = 0; i < deopt_count; i++) {
+    DeoptimizationState* deoptimization_state = deoptimization_states_[i];
+    data->SetAstId(i, deoptimization_state->bailout_id());
+    CHECK_NE(NULL, deoptimization_states_[i]);
+    data->SetTranslationIndex(
+        i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
+    data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+    data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
+  }
+
+  code_object->set_deoptimization_data(*data);
+}
+
+
+void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+  CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
+
+  bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
+
+  RecordSafepoint(
+      instr->pointer_map(), Safepoint::kSimple, 0,
+      needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+
+  if (flags & CallDescriptor::kNeedsNopAfterCall) {
+    AddNopForSmiCodeInlining();
+  }
+
+  if (needs_frame_state) {
+    MarkLazyDeoptSite();
+    // If the frame state is present, it starts at argument 1
+    // (just after the code address).
+    InstructionOperandConverter converter(this, instr);
+    // Deoptimization info starts at argument 1
+    size_t frame_state_offset = 1;
+    FrameStateDescriptor* descriptor =
+        GetFrameStateDescriptor(instr, frame_state_offset);
+    int pc_offset = masm()->pc_offset();
+    int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
+                                          descriptor->state_combine());
+    // If the pre-call frame state differs from the post-call one, produce the
+    // pre-call frame state, too.
+    // TODO(jarin) We might want to avoid building the pre-call frame state
+    // because it is only used to get locals and arguments (by the debugger and
+    // f.arguments), and those are the same in the pre-call and post-call
+    // states.
+    if (descriptor->state_combine() != kIgnoreOutput) {
+      deopt_state_id =
+          BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
+    }
+#if DEBUG
+    // Make sure all the values live in stack slots or they are immediates.
+    // (The values should not live in register because registers are clobbered
+    // by calls.)
+    for (size_t i = 0; i < descriptor->size(); i++) {
+      InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
+      CHECK(op->IsStackSlot() || op->IsImmediate());
+    }
+#endif
+    safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
+  }
+}
+
+
+int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = static_cast<int>(deoptimization_literals_.size());
+  for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.push_back(literal);
+  return result;
+}
+
+
+FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
+    Instruction* instr, size_t frame_state_offset) {
+  InstructionOperandConverter i(this, instr);
+  InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
+      i.InputInt32(static_cast<int>(frame_state_offset)));
+  return code()->GetFrameStateDescriptor(state_id);
+}
+
+
+void CodeGenerator::BuildTranslationForFrameStateDescriptor(
+    FrameStateDescriptor* descriptor, Instruction* instr,
+    Translation* translation, size_t frame_state_offset,
+    OutputFrameStateCombine state_combine) {
+  // Outer-most state must be added to translation first.
+  if (descriptor->outer_state() != NULL) {
+    BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
+                                            translation, frame_state_offset,
+                                            kIgnoreOutput);
+  }
+
+  int id = Translation::kSelfLiteralId;
+  if (!descriptor->jsfunction().is_null()) {
+    id = DefineDeoptimizationLiteral(
+        Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
+  }
+
+  switch (descriptor->type()) {
+    case JS_FRAME:
+      translation->BeginJSFrame(
+          descriptor->bailout_id(), id,
+          static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(
+          id, static_cast<unsigned int>(descriptor->parameters_count()));
+      break;
+  }
+
+  frame_state_offset += descriptor->outer_state()->GetTotalSize();
+  for (size_t i = 0; i < descriptor->size(); i++) {
+    AddTranslationForOperand(
+        translation, instr,
+        instr->InputAt(static_cast<int>(frame_state_offset + i)));
+  }
+
+  switch (state_combine) {
+    case kPushOutput:
+      DCHECK(instr->OutputCount() == 1);
+      AddTranslationForOperand(translation, instr, instr->OutputAt(0));
+      break;
+    case kIgnoreOutput:
+      break;
+  }
+}
+
+
+int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
+                                    size_t frame_state_offset,
+                                    OutputFrameStateCombine state_combine) {
+  FrameStateDescriptor* descriptor =
+      GetFrameStateDescriptor(instr, frame_state_offset);
+  frame_state_offset++;
+
+  Translation translation(
+      &translations_, static_cast<int>(descriptor->GetFrameCount()),
+      static_cast<int>(descriptor->GetJSFrameCount()), zone());
+  BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
+                                          frame_state_offset, state_combine);
+
+  int deoptimization_id = static_cast<int>(deoptimization_states_.size());
+
+  deoptimization_states_.push_back(new (zone()) DeoptimizationState(
+      descriptor->bailout_id(), translation.index(), pc_offset));
+
+  return deoptimization_id;
+}
+
+
+void CodeGenerator::AddTranslationForOperand(Translation* translation,
+                                             Instruction* instr,
+                                             InstructionOperand* op) {
+  if (op->IsStackSlot()) {
+    translation->StoreStackSlot(op->index());
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsRegister()) {
+    InstructionOperandConverter converter(this, instr);
+    translation->StoreRegister(converter.ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    InstructionOperandConverter converter(this, instr);
+    translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+  } else if (op->IsImmediate()) {
+    InstructionOperandConverter converter(this, instr);
+    Constant constant = converter.ToConstant(op);
+    Handle<Object> constant_object;
+    switch (constant.type()) {
+      case Constant::kInt32:
+        constant_object =
+            isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+        break;
+      case Constant::kFloat64:
+        constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
+        break;
+      case Constant::kHeapObject:
+        constant_object = constant.ToHeapObject();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    int literal_id = DefineDeoptimizationLiteral(constant_object);
+    translation->StoreLiteral(literal_id);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::MarkLazyDeoptSite() {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+#if !V8_TURBOFAN_BACKEND
+
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
+
+#endif  // !V8_TURBOFAN_BACKEND
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
new file mode 100644
index 0000000..ddc2f9a
--- /dev/null
+++ b/src/compiler/code-generator.h
@@ -0,0 +1,138 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_H_
+#define V8_COMPILER_CODE_GENERATOR_H_
+
+#include <deque>
+
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/instruction.h"
+#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Generates native code for a sequence of instructions.
+class CodeGenerator FINAL : public GapResolver::Assembler {
+ public:
+  explicit CodeGenerator(InstructionSequence* code);
+
+  // Generate native code.
+  Handle<Code> GenerateCode();
+
+  InstructionSequence* code() const { return code_; }
+  Frame* frame() const { return code()->frame(); }
+  Graph* graph() const { return code()->graph(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return code()->linkage(); }
+  Schedule* schedule() const { return code()->schedule(); }
+
+ private:
+  MacroAssembler* masm() { return &masm_; }
+  GapResolver* resolver() { return &resolver_; }
+  SafepointTableBuilder* safepoints() { return &safepoints_; }
+  Zone* zone() const { return code()->zone(); }
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const {
+    return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+           block->deferred_ == current_block_->deferred_;
+  }
+
+  // Record a safepoint with the given pointer map.
+  void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode deopt_mode);
+
+  // Assemble code for the specified instruction.
+  void AssembleInstruction(Instruction* instr);
+  void AssembleSourcePosition(SourcePositionInstruction* instr);
+  void AssembleGap(GapInstruction* gap);
+
+  // ===========================================================================
+  // ============= Architecture-specific code generation methods. ==============
+  // ===========================================================================
+
+  void AssembleArchInstruction(Instruction* instr);
+  void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+  void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+
+  void AssembleDeoptimizerCall(int deoptimization_id);
+
+  // Generates an architecture-specific, descriptor-specific prologue
+  // to set up a stack frame.
+  void AssemblePrologue();
+  // Generates an architecture-specific, descriptor-specific return sequence
+  // to tear down a stack frame.
+  void AssembleReturn();
+
+  // ===========================================================================
+  // ============== Architecture-specific gap resolver methods. ================
+  // ===========================================================================
+
+  // Interface used by the gap resolver to emit moves and swaps.
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE;
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE;
+
+  // ===========================================================================
+  // Deoptimization table construction
+  void AddSafepointAndDeopt(Instruction* instr);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+  FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+                                                size_t frame_state_offset);
+  int BuildTranslation(Instruction* instr, int pc_offset,
+                       size_t frame_state_offset,
+                       OutputFrameStateCombine state_combine);
+  void BuildTranslationForFrameStateDescriptor(
+      FrameStateDescriptor* descriptor, Instruction* instr,
+      Translation* translation, size_t frame_state_offset,
+      OutputFrameStateCombine state_combine);
+  void AddTranslationForOperand(Translation* translation, Instruction* instr,
+                                InstructionOperand* op);
+  void AddNopForSmiCodeInlining();
+  void EnsureSpaceForLazyDeopt();
+  void MarkLazyDeoptSite();
+
+  // ===========================================================================
+  struct DeoptimizationState : ZoneObject {
+   public:
+    BailoutId bailout_id() const { return bailout_id_; }
+    int translation_id() const { return translation_id_; }
+    int pc_offset() const { return pc_offset_; }
+
+    DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
+        : bailout_id_(bailout_id),
+          translation_id_(translation_id),
+          pc_offset_(pc_offset) {}
+
+   private:
+    BailoutId bailout_id_;
+    int translation_id_;
+    int pc_offset_;
+  };
+
+  InstructionSequence* code_;
+  BasicBlock* current_block_;
+  SourcePosition current_source_position_;
+  MacroAssembler masm_;
+  GapResolver resolver_;
+  SafepointTableBuilder safepoints_;
+  ZoneDeque<DeoptimizationState*> deoptimization_states_;
+  ZoneDeque<Handle<Object> > deoptimization_literals_;
+  TranslationBuffer translations_;
+  int last_lazy_deopt_pc_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_H
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
new file mode 100644
index 0000000..1ed2b04
--- /dev/null
+++ b/src/compiler/common-node-cache.h
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
+#define V8_COMPILER_COMMON_NODE_CACHE_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Bundles various caches for common nodes.
+class CommonNodeCache FINAL : public ZoneObject {
+ public:
+  explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+
+  Node** FindInt32Constant(int32_t value) {
+    return int32_constants_.Find(zone_, value);
+  }
+
+  Node** FindFloat64Constant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
+  }
+
+  Node** FindExternalConstant(ExternalReference reference) {
+    return external_constants_.Find(zone_, reference.address());
+  }
+
+  Node** FindNumberConstant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return number_constants_.Find(zone_, bit_cast<int64_t>(value));
+  }
+
+  Zone* zone() const { return zone_; }
+
+ private:
+  Int32NodeCache int32_constants_;
+  Int64NodeCache float64_constants_;
+  PtrNodeCache external_constants_;
+  Int64NodeCache number_constants_;
+  Zone* zone_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_COMMON_NODE_CACHE_H_
diff --git a/src/compiler/common-operator-unittest.cc b/src/compiler/common-operator-unittest.cc
new file mode 100644
index 0000000..5001770
--- /dev/null
+++ b/src/compiler/common-operator-unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include <limits>
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// -----------------------------------------------------------------------------
+// Shared operators.
+
+
+namespace {
+
+struct SharedOperator {
+  const Operator* (CommonOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  Operator::Properties properties;
+  int value_input_count;
+  int effect_input_count;
+  int control_input_count;
+  int effect_output_count;
+  int control_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
+  return os << IrOpcode::Mnemonic(fop.opcode);
+}
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, effect_input_count,        \
+               control_input_count, effect_output_count, control_output_count) \
+  {                                                                            \
+    &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties,               \
+        value_input_count, effect_input_count, control_input_count,            \
+        effect_output_count, control_output_count                              \
+  }
+    SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
+    SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
+    SHARED(Branch, Operator::kFoldable, 1, 0, 1, 0, 2),
+    SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
+    SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
+    SHARED(Throw, Operator::kFoldable, 1, 0, 1, 0, 1),
+    SHARED(Return, Operator::kNoProperties, 1, 1, 1, 1, 1),
+    SHARED(ControlEffect, Operator::kPure, 0, 0, 1, 1, 0)
+#undef SHARED
+};
+
+
+class CommonSharedOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<SharedOperator> {};
+
+}  // namespace
+
+
+TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
+  const SharedOperator& sop = GetParam();
+  CommonOperatorBuilder common1(zone());
+  CommonOperatorBuilder common2(zone());
+  EXPECT_EQ((common1.*sop.constructor)(), (common2.*sop.constructor)());
+}
+
+
+TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+
+  EXPECT_EQ(sop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(sop.effect_input_count,
+            OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(sop.control_input_count,
+            OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(
+      sop.value_input_count + sop.effect_input_count + sop.control_input_count,
+      OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(sop.effect_output_count,
+            OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(sop.control_output_count,
+            OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(CommonSharedOperatorTest, OpcodeIsCorrect) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+  EXPECT_EQ(sop.opcode, op->opcode());
+}
+
+
+TEST_P(CommonSharedOperatorTest, Properties) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+  EXPECT_EQ(sop.properties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
+                        ::testing::ValuesIn(kSharedOperators));
+
+
+// -----------------------------------------------------------------------------
+// Other operators.
+
+
+namespace {
+
+class CommonOperatorTest : public TestWithZone {
+ public:
+  CommonOperatorTest() : common_(zone()) {}
+  virtual ~CommonOperatorTest() {}
+
+  CommonOperatorBuilder* common() { return &common_; }
+
+ private:
+  CommonOperatorBuilder common_;
+};
+
+
+const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
+
+const float kFloat32Values[] = {
+    std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
+    std::numeric_limits<float>::max()};
+
+}  // namespace
+
+
+TEST_F(CommonOperatorTest, Float32Constant) {
+  TRACED_FOREACH(float, value, kFloat32Values) {
+    const Operator* op = common()->Float32Constant(value);
+    EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
+    EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+
+TEST_F(CommonOperatorTest, ValueEffect) {
+  TRACED_FOREACH(int, arguments, kArguments) {
+    const Operator* op = common()->ValueEffect(arguments);
+    EXPECT_EQ(arguments, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+
+TEST_F(CommonOperatorTest, Finish) {
+  TRACED_FOREACH(int, arguments, kArguments) {
+    const Operator* op = common()->Finish(arguments);
+    EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(arguments, OperatorProperties::GetEffectInputCount(op));
+    EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
new file mode 100644
index 0000000..19792bd
--- /dev/null
+++ b/src/compiler/common-operator.cc
@@ -0,0 +1,252 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include "src/assembler.h"
+#include "src/base/lazy-instance.h"
+#include "src/compiler/linkage.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(turbofan): Use size_t instead of int here.
+class ControlOperator : public Operator1<int> {
+ public:
+  ControlOperator(IrOpcode::Value opcode, Properties properties, int inputs,
+                  int outputs, int controls, const char* mnemonic)
+      : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
+                       controls) {}
+
+  virtual OStream& PrintParameter(OStream& os) const FINAL { return os; }
+};
+
+}  // namespace
+
+
+// Specialization for static parameters of type {ExternalReference}.
+template <>
+struct StaticParameterTraits<ExternalReference> {
+  static OStream& PrintTo(OStream& os, ExternalReference reference) {
+    os << reference.address();
+    // TODO(bmeurer): Move to operator<<(os, ExternalReference)
+    const Runtime::Function* function =
+        Runtime::FunctionForEntry(reference.address());
+    if (function) {
+      os << " <" << function->name << ".entry>";
+    }
+    return os;
+  }
+  static int HashCode(ExternalReference reference) {
+    return bit_cast<int>(static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(reference.address())));
+  }
+  static bool Equals(ExternalReference lhs, ExternalReference rhs) {
+    return lhs == rhs;
+  }
+};
+
+
+#define SHARED_OP_LIST(V)               \
+  V(Dead, Operator::kFoldable, 0, 0)    \
+  V(End, Operator::kFoldable, 0, 1)     \
+  V(Branch, Operator::kFoldable, 1, 1)  \
+  V(IfTrue, Operator::kFoldable, 0, 1)  \
+  V(IfFalse, Operator::kFoldable, 0, 1) \
+  V(Throw, Operator::kFoldable, 1, 1)   \
+  V(Return, Operator::kNoProperties, 1, 1)
+
+
+struct CommonOperatorBuilderImpl FINAL {
+#define SHARED(Name, properties, value_input_count, control_input_count)       \
+  struct Name##Operator FINAL : public ControlOperator {                       \
+    Name##Operator()                                                           \
+        : ControlOperator(IrOpcode::k##Name, properties, value_input_count, 0, \
+                          control_input_count, #Name) {}                       \
+  };                                                                           \
+  Name##Operator k##Name##Operator;
+  SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+  struct ControlEffectOperator FINAL : public SimpleOperator {
+    ControlEffectOperator()
+        : SimpleOperator(IrOpcode::kControlEffect, Operator::kPure, 0, 0,
+                         "ControlEffect") {}
+  };
+  ControlEffectOperator kControlEffectOperator;
+};
+
+
+static base::LazyInstance<CommonOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
+    : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define SHARED(Name, properties, value_input_count, control_input_count) \
+  const Operator* CommonOperatorBuilder::Name() {                        \
+    return &impl_.k##Name##Operator;                                     \
+  }
+SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+
+const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
+  // Outputs are formal parameters, plus context, receiver, and JSFunction.
+  const int value_output_count = num_formal_parameters + 3;
+  return new (zone()) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
+                                      value_output_count, 0, "Start");
+}
+
+
+const Operator* CommonOperatorBuilder::Merge(int controls) {
+  return new (zone()) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
+                                      0, controls, "Merge");
+}
+
+
+const Operator* CommonOperatorBuilder::Loop(int controls) {
+  return new (zone()) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
+                                      0, controls, "Loop");
+}
+
+
+const Operator* CommonOperatorBuilder::Parameter(int index) {
+  return new (zone()) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
+                                     1, "Parameter", index);
+}
+
+
+const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
+  return new (zone()) Operator1<int32_t>(
+      IrOpcode::kInt32Constant, Operator::kPure, 0, 1, "Int32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
+  return new (zone()) Operator1<int64_t>(
+      IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
+  return new (zone())
+      Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
+                       "Float32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
+  return new (zone())
+      Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
+                        "Float64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::ExternalConstant(
+    const ExternalReference& value) {
+  return new (zone())
+      Operator1<ExternalReference>(IrOpcode::kExternalConstant, Operator::kPure,
+                                   0, 1, "ExternalConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
+  return new (zone())
+      Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+                        "NumberConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::HeapConstant(
+    const Unique<Object>& value) {
+  return new (zone()) Operator1<Unique<Object> >(
+      IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty phis.
+  return new (zone()) Operator1<MachineType>(IrOpcode::kPhi, Operator::kPure,
+                                             arguments, 1, "Phi", type);
+}
+
+
+const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty phis.
+  return new (zone()) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
+                                     0, "EffectPhi", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::ControlEffect() {
+  return &impl_.kControlEffectOperator;
+}
+
+
+const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty value effects.
+  return new (zone()) SimpleOperator(IrOpcode::kValueEffect, Operator::kPure,
+                                     arguments, 0, "ValueEffect");
+}
+
+
+const Operator* CommonOperatorBuilder::Finish(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty finishes.
+  return new (zone()) Operator1<int>(IrOpcode::kFinish, Operator::kPure, 1, 1,
+                                     "Finish", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::StateValues(int arguments) {
+  return new (zone()) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
+                                     arguments, 1, "StateValues", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::FrameState(
+    FrameStateType type, BailoutId bailout_id,
+    OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
+  return new (zone()) Operator1<FrameStateCallInfo>(
+      IrOpcode::kFrameState, Operator::kPure, 4, 1, "FrameState",
+      FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
+}
+
+
+const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
+  class CallOperator FINAL : public Operator1<const CallDescriptor*> {
+   public:
+    // TODO(titzer): Operator still uses int, whereas CallDescriptor uses
+    // size_t.
+    CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
+        : Operator1<const CallDescriptor*>(
+              IrOpcode::kCall, descriptor->properties(),
+              static_cast<int>(descriptor->InputCount() +
+                               descriptor->FrameStateCount()),
+              static_cast<int>(descriptor->ReturnCount()), mnemonic,
+              descriptor) {}
+
+    virtual OStream& PrintParameter(OStream& os) const OVERRIDE {
+      return os << "[" << *parameter() << "]";
+    }
+  };
+  return new (zone()) CallOperator(descriptor, "Call");
+}
+
+
+const Operator* CommonOperatorBuilder::Projection(size_t index) {
+  return new (zone()) Operator1<size_t>(IrOpcode::kProjection, Operator::kPure,
+                                        1, 1, "Projection", index);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
new file mode 100644
index 0000000..a3659ad
--- /dev/null
+++ b/src/compiler/common-operator.h
@@ -0,0 +1,117 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_H_
+#define V8_COMPILER_COMMON_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ExternalReference;
+class OStream;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CallDescriptor;
+struct CommonOperatorBuilderImpl;
+class Operator;
+
+
+// Flag that describes how to combine the current environment with
+// the output of a node to obtain a framestate for lazy bailout.
+enum OutputFrameStateCombine {
+  kPushOutput,   // Push the output on the expression stack.
+  kIgnoreOutput  // Use the frame state as-is.
+};
+
+
+// The type of stack frame that a FrameState node represents.
+enum FrameStateType {
+  JS_FRAME,          // Represents an unoptimized JavaScriptFrame.
+  ARGUMENTS_ADAPTOR  // Represents an ArgumentsAdaptorFrame.
+};
+
+
+class FrameStateCallInfo FINAL {
+ public:
+  FrameStateCallInfo(
+      FrameStateType type, BailoutId bailout_id,
+      OutputFrameStateCombine state_combine,
+      MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>())
+      : type_(type),
+        bailout_id_(bailout_id),
+        frame_state_combine_(state_combine),
+        jsfunction_(jsfunction) {}
+
+  FrameStateType type() const { return type_; }
+  BailoutId bailout_id() const { return bailout_id_; }
+  OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+  MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+
+ private:
+  FrameStateType type_;
+  BailoutId bailout_id_;
+  OutputFrameStateCombine frame_state_combine_;
+  MaybeHandle<JSFunction> jsfunction_;
+};
+
+
+// Interface for building common operators that can be used at any level of IR,
+// including JavaScript, mid-level, and low-level.
+class CommonOperatorBuilder FINAL {
+ public:
+  explicit CommonOperatorBuilder(Zone* zone);
+
+  const Operator* Dead();
+  const Operator* End();
+  const Operator* Branch();
+  const Operator* IfTrue();
+  const Operator* IfFalse();
+  const Operator* Throw();
+  const Operator* Return();
+
+  const Operator* Start(int num_formal_parameters);
+  const Operator* Merge(int controls);
+  const Operator* Loop(int controls);
+  const Operator* Parameter(int index);
+
+  const Operator* Int32Constant(int32_t);
+  const Operator* Int64Constant(int64_t);
+  const Operator* Float32Constant(volatile float);
+  const Operator* Float64Constant(volatile double);
+  const Operator* ExternalConstant(const ExternalReference&);
+  const Operator* NumberConstant(volatile double);
+  const Operator* HeapConstant(const Unique<Object>&);
+
+  const Operator* Phi(MachineType type, int arguments);
+  const Operator* EffectPhi(int arguments);
+  const Operator* ControlEffect();
+  const Operator* ValueEffect(int arguments);
+  const Operator* Finish(int arguments);
+  const Operator* StateValues(int arguments);
+  const Operator* FrameState(
+      FrameStateType type, BailoutId bailout_id,
+      OutputFrameStateCombine state_combine,
+      MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
+  const Operator* Call(const CallDescriptor* descriptor);
+  const Operator* Projection(size_t index);
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  const CommonOperatorBuilderImpl& impl_;
+  Zone* const zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMMON_OPERATOR_H_
diff --git a/src/compiler/compiler-test-utils.h b/src/compiler/compiler-test-utils.h
new file mode 100644
index 0000000..437abd6
--- /dev/null
+++ b/src/compiler/compiler-test-utils.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILER_TEST_UTILS_H_
+#define V8_COMPILER_COMPILER_TEST_UTILS_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The TARGET_TEST(Case, Name) macro works just like
+// TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST(Case, Name) TEST(Case, Name)
+#else
+#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_F(Case, Name) macro works just like
+// TEST_F(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
+#else
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_P(Case, Name) macro works just like
+// TEST_P(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
+#else
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TYPED_TEST(Case, Name) macro works just like
+// TYPED_TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
+#else
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
+#endif
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMPILER_TEST_UTILS_H_
diff --git a/src/compiler/compiler.gyp b/src/compiler/compiler.gyp
new file mode 100644
index 0000000..ec5ec28
--- /dev/null
+++ b/src/compiler/compiler.gyp
@@ -0,0 +1,60 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'compiler-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../test/test.gyp:run-all-unittests',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'change-lowering-unittest.cc',
+        'common-operator-unittest.cc',
+        'compiler-test-utils.h',
+        'graph-reducer-unittest.cc',
+        'graph-unittest.cc',
+        'graph-unittest.h',
+        'instruction-selector-unittest.cc',
+        'instruction-selector-unittest.h',
+        'js-builtin-reducer-unittest.cc',
+        'machine-operator-reducer-unittest.cc',
+        'machine-operator-unittest.cc',
+        'simplified-operator-reducer-unittest.cc',
+        'simplified-operator-unittest.cc',
+        'value-numbering-reducer-unittest.cc',
+      ],
+      'conditions': [
+        ['v8_target_arch=="arm"', {
+          'sources': [  ### gcmole(arch:arm) ###
+            'arm/instruction-selector-arm-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="arm64"', {
+          'sources': [  ### gcmole(arch:arm64) ###
+            'arm64/instruction-selector-arm64-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="ia32"', {
+          'sources': [  ### gcmole(arch:ia32) ###
+            'ia32/instruction-selector-ia32-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="x64"', {
+          'sources': [  ### gcmole(arch:x64) ###
+            'x64/instruction-selector-x64-unittest.cc',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
new file mode 100644
index 0000000..3b7d05b
--- /dev/null
+++ b/src/compiler/control-builders.cc
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "control-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+void IfBuilder::If(Node* condition) {
+  builder_->NewBranch(condition);
+  else_environment_ = environment()->CopyForConditional();
+}
+
+
+void IfBuilder::Then() { builder_->NewIfTrue(); }
+
+
+void IfBuilder::Else() {
+  builder_->NewMerge();
+  then_environment_ = environment();
+  set_environment(else_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void IfBuilder::End() {
+  then_environment_->Merge(environment());
+  set_environment(then_environment_);
+}
+
+
+void LoopBuilder::BeginLoop() {
+  builder_->NewLoop();
+  loop_environment_ = environment()->CopyForLoop();
+  continue_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void LoopBuilder::Continue() {
+  continue_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::EndBody() {
+  continue_environment_->Merge(environment());
+  set_environment(continue_environment_);
+}
+
+
+void LoopBuilder::EndLoop() {
+  loop_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void LoopBuilder::BreakUnless(Node* condition) {
+  IfBuilder control_if(builder_);
+  control_if.If(condition);
+  control_if.Then();
+  control_if.Else();
+  Break();
+  control_if.End();
+}
+
+
+void SwitchBuilder::BeginSwitch() {
+  body_environment_ = environment()->CopyAsUnreachable();
+  label_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+  body_environments_.AddBlock(NULL, case_count(), zone());
+}
+
+
+void SwitchBuilder::BeginLabel(int index, Node* condition) {
+  builder_->NewBranch(condition);
+  label_environment_ = environment()->CopyForConditional();
+  builder_->NewIfTrue();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::EndLabel() {
+  set_environment(label_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void SwitchBuilder::DefaultAt(int index) {
+  label_environment_ = environment()->CopyAsUnreachable();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::BeginCase(int index) {
+  set_environment(body_environments_[index]);
+  environment()->Merge(body_environment_);
+}
+
+
+void SwitchBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void SwitchBuilder::EndCase() { body_environment_ = environment(); }
+
+
+void SwitchBuilder::EndSwitch() {
+  break_environment_->Merge(label_environment_);
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void BlockBuilder::BeginBlock() {
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void BlockBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void BlockBuilder::EndBlock() {
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
new file mode 100644
index 0000000..695282b
--- /dev/null
+++ b/src/compiler/control-builders.h
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
+#define V8_COMPILER_CONTROL_BUILDERS_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Base class for all control builders. Also provides a common interface for
+// control builders to handle 'break' and 'continue' statements when they are
+// used to model breakable statements.
+class ControlBuilder {
+ public:
+  explicit ControlBuilder(StructuredGraphBuilder* builder)
+      : builder_(builder) {}
+  virtual ~ControlBuilder() {}
+
+  // Interface for break and continue.
+  virtual void Break() { UNREACHABLE(); }
+  virtual void Continue() { UNREACHABLE(); }
+
+ protected:
+  typedef StructuredGraphBuilder Builder;
+  typedef StructuredGraphBuilder::Environment Environment;
+
+  Zone* zone() const { return builder_->zone(); }
+  Environment* environment() { return builder_->environment(); }
+  void set_environment(Environment* env) { builder_->set_environment(env); }
+
+  Builder* builder_;
+};
+
+
+// Tracks control flow for a conditional statement.
+class IfBuilder : public ControlBuilder {
+ public:
+  explicit IfBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        then_environment_(NULL),
+        else_environment_(NULL) {}
+
+  // Primitive control commands.
+  void If(Node* condition);
+  void Then();
+  void Else();
+  void End();
+
+ private:
+  Environment* then_environment_;  // Environment after the 'then' body.
+  Environment* else_environment_;  // Environment for the 'else' body.
+};
+
+
+// Tracks control flow for an iteration statement.
+class LoopBuilder : public ControlBuilder {
+ public:
+  explicit LoopBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        loop_environment_(NULL),
+        continue_environment_(NULL),
+        break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginLoop();
+  void EndBody();
+  void EndLoop();
+
+  // Primitive support for break and continue.
+  virtual void Continue();
+  virtual void Break();
+
+  // Compound control command for conditional break.
+  void BreakUnless(Node* condition);
+
+ private:
+  Environment* loop_environment_;      // Environment of the loop header.
+  Environment* continue_environment_;  // Environment after the loop body.
+  Environment* break_environment_;     // Environment after the loop exits.
+};
+
+
+// Tracks control flow for a switch statement.
+class SwitchBuilder : public ControlBuilder {
+ public:
+  explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+      : ControlBuilder(builder),
+        body_environment_(NULL),
+        label_environment_(NULL),
+        break_environment_(NULL),
+        body_environments_(case_count, zone()) {}
+
+  // Primitive control commands.
+  void BeginSwitch();
+  void BeginLabel(int index, Node* condition);
+  void EndLabel();
+  void DefaultAt(int index);
+  void BeginCase(int index);
+  void EndCase();
+  void EndSwitch();
+
+  // Primitive support for break.
+  virtual void Break();
+
+  // The number of cases within a switch is statically known.
+  int case_count() const { return body_environments_.capacity(); }
+
+ private:
+  Environment* body_environment_;   // Environment after last case body.
+  Environment* label_environment_;  // Environment for next label condition.
+  Environment* break_environment_;  // Environment after the switch exits.
+  ZoneList<Environment*> body_environments_;
+};
+
+
+// Tracks control flow for a block statement.
+class BlockBuilder : public ControlBuilder {
+ public:
+  explicit BlockBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder), break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginBlock();
+  void EndBlock();
+
+  // Primitive support for break.
+  virtual void Break();
+
+ private:
+  Environment* break_environment_;  // Environment after the block exits.
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
new file mode 100644
index 0000000..afcbc37
--- /dev/null
+++ b/src/compiler/frame.h
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_H_
+#define V8_COMPILER_FRAME_H_
+
+#include "src/v8.h"
+
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects the spill slot requirements and the allocated general and double
+// registers for a compiled function. Frames are usually populated by the
+// register allocator and are used by Linkage to generate code for the prologue
+// and epilogue to compiled code.
+class Frame {
+ public:
+  Frame()
+      : register_save_area_size_(0),
+        spill_slot_count_(0),
+        double_spill_slot_count_(0),
+        allocated_registers_(NULL),
+        allocated_double_registers_(NULL) {}
+
+  inline int GetSpillSlotCount() { return spill_slot_count_; }
+  inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+
+  void SetAllocatedRegisters(BitVector* regs) {
+    DCHECK(allocated_registers_ == NULL);
+    allocated_registers_ = regs;
+  }
+
+  void SetAllocatedDoubleRegisters(BitVector* regs) {
+    DCHECK(allocated_double_registers_ == NULL);
+    allocated_double_registers_ = regs;
+  }
+
+  bool DidAllocateDoubleRegisters() {
+    return !allocated_double_registers_->IsEmpty();
+  }
+
+  void SetRegisterSaveAreaSize(int size) {
+    DCHECK(IsAligned(size, kPointerSize));
+    register_save_area_size_ = size;
+  }
+
+  int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+
+  int AllocateSpillSlot(bool is_double) {
+    // If 32-bit, skip one if the new slot is a double.
+    if (is_double) {
+      if (kDoubleSize > kPointerSize) {
+        DCHECK(kDoubleSize == kPointerSize * 2);
+        spill_slot_count_++;
+        spill_slot_count_ |= 1;
+      }
+      double_spill_slot_count_++;
+    }
+    return spill_slot_count_++;
+  }
+
+ private:
+  int register_save_area_size_;
+  int spill_slot_count_;
+  int double_spill_slot_count_;
+  BitVector* allocated_registers_;
+  BitVector* allocated_double_registers_;
+};
+
+
+// Represents an offset from either the stack pointer or frame pointer.
+class FrameOffset {
+ public:
+  inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
+  inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
+  inline int offset() { return offset_ & ~1; }
+
+  inline static FrameOffset FromStackPointer(int offset) {
+    DCHECK((offset & 1) == 0);
+    return FrameOffset(offset | kFromSp);
+  }
+
+  inline static FrameOffset FromFramePointer(int offset) {
+    DCHECK((offset & 1) == 0);
+    return FrameOffset(offset | kFromFp);
+  }
+
+ private:
+  explicit FrameOffset(int offset) : offset_(offset) {}
+
+  int offset_;  // Encodes SP or FP in the low order bit.
+
+  static const int kFromSp = 1;
+  static const int kFromFp = 0;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_FRAME_H_
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
new file mode 100644
index 0000000..f369607
--- /dev/null
+++ b/src/compiler/gap-resolver.cc
@@ -0,0 +1,136 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef ZoneList<MoveOperands>::iterator op_iterator;
+
+#ifdef ENABLE_SLOW_DCHECKS
+// TODO(svenpanne) Brush up InstructionOperand with comparison?
+struct InstructionOperandComparator {
+  bool operator()(const InstructionOperand* x,
+                  const InstructionOperand* y) const {
+    return (x->kind() < y->kind()) ||
+           (x->kind() == y->kind() && x->index() < y->index());
+  }
+};
+#endif
+
+// No operand should be the destination for more than one move.
+static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
+#ifdef ENABLE_SLOW_DCHECKS
+  std::set<InstructionOperand*, InstructionOperandComparator> seen;
+  for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
+    SLOW_DCHECK(seen.find(i->destination()) == seen.end());
+    seen.insert(i->destination());
+  }
+#endif
+}
+
+
+void GapResolver::Resolve(ParallelMove* parallel_move) const {
+  ZoneList<MoveOperands>* moves = parallel_move->move_operands();
+  // TODO(svenpanne) Use the member version of remove_if when we use real lists.
+  op_iterator end =
+      std::remove_if(moves->begin(), moves->end(),
+                     std::mem_fun_ref(&MoveOperands::IsRedundant));
+  moves->Rewind(static_cast<int>(end - moves->begin()));
+
+  VerifyMovesAreInjective(moves);
+
+  for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
+    if (!move->IsEliminated()) PerformMove(moves, &*move);
+  }
+}
+
+
+void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
+                              MoveOperands* move) const {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We mark a
+  // move as "pending" on entry to PerformMove in order to detect cycles in the
+  // move graph.  We use operand swaps to resolve cycles, which means that a
+  // call to PerformMove could change any source operand in the move graph.
+  DCHECK(!move->IsPending());
+  DCHECK(!move->IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved on the side.
+  DCHECK_NOT_NULL(move->source());  // Or else it will look eliminated.
+  InstructionOperand* destination = move->destination();
+  move->set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve dependencies.
+  // Any unperformed, unpending move with a source the same as this one's
+  // destination blocks this one so recursively perform all such moves.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(destination) && !other->IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does not
+      // miss any).  Assume there is a non-blocking move with source A and this
+      // move is blocked on source B and there is a swap of A and B.  Then A and
+      // B must be involved in the same cycle (or they would not be swapped).
+      // Since this move's destination is B and there is only a single incoming
+      // edge to an operand, this move must also be involved in the same cycle.
+      // In that case, the blocking move will be created but will be "pending"
+      // when we return from PerformMove.
+      PerformMove(moves, other);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as pending, so
+  // restore its destination.
+  move->set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and so
+  // it may now be the last move in the cycle.  If so remove it.
+  InstructionOperand* source = move->source();
+  if (source->Equals(destination)) {
+    move->Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case we
+  // have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  op_iterator blocker = std::find_if(
+      moves->begin(), moves->end(),
+      std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+  if (blocker == moves->end()) {
+    // The easy case: This move is not blocked.
+    assembler_->AssembleMove(source, destination);
+    move->Eliminate();
+    return;
+  }
+
+  DCHECK(blocker->IsPending());
+  // Ensure source is a register or both are stack slots, to limit swap cases.
+  if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    std::swap(source, destination);
+  }
+  assembler_->AssembleSwap(source, destination);
+  move->Eliminate();
+
+  // Any unperformed (including pending) move with a source of either this
+  // move's source or destination needs to have their source changed to
+  // reflect the state of affairs after the swap.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(source)) {
+      other->set_source(destination);
+    } else if (other->Blocks(destination)) {
+      other->set_source(source);
+    }
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
new file mode 100644
index 0000000..98aaab2
--- /dev/null
+++ b/src/compiler/gap-resolver.h
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GAP_RESOLVER_H_
+#define V8_COMPILER_GAP_RESOLVER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GapResolver FINAL {
+ public:
+  // Interface used by the gap resolver to emit moves and swaps.
+  class Assembler {
+   public:
+    virtual ~Assembler() {}
+
+    // Assemble move.
+    virtual void AssembleMove(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+    // Assemble swap.
+    virtual void AssembleSwap(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+  };
+
+  explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(ParallelMove* parallel_move) const;
+
+ private:
+  // Perform the given move, possibly requiring other moves to satisfy
+  // dependencies.
+  void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
+
+  // Assembler used to emit moves and save registers.
+  Assembler* const assembler_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/src/compiler/generic-algorithm-inl.h b/src/compiler/generic-algorithm-inl.h
new file mode 100644
index 0000000..a25131f
--- /dev/null
+++ b/src/compiler/generic-algorithm-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class N>
+class NodeInputIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Inputs::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->inputs().begin(); }
+  static Iterator end(Node* node) { return node->inputs().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().from(); }
+};
+
+template <class N>
+class NodeUseIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Uses::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->uses().begin(); }
+  static Iterator end(Node* node) { return node->uses().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().to(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
diff --git a/src/compiler/generic-algorithm.h b/src/compiler/generic-algorithm.h
new file mode 100644
index 0000000..cd4984f
--- /dev/null
+++ b/src/compiler/generic-algorithm.h
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_H_
+
+#include <stack>
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
+// post-order. Visitation uses an explicitly allocated stack rather than the
+// execution stack to avoid stack overflow. Although GenericGraphVisit is
+// primarily intended to traverse networks of nodes through their
+// dependencies and uses, it also can be used to visit any graph-like network
+// by specifying custom traits.
+class GenericGraphVisit {
+ public:
+  enum Control {
+    CONTINUE = 0x0,  // Continue depth-first normally
+    SKIP = 0x1,      // Skip this node and its successors
+    REENTER = 0x2,   // Allow reentering this node
+    DEFER = SKIP | REENTER
+  };
+
+  // struct Visitor {
+  //   Control Pre(Traits::Node* current);
+  //   Control Post(Traits::Node* current);
+  //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
+  //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+  // }
+  template <class Visitor, class Traits, class RootIterator>
+  static void Visit(GenericGraphBase* graph, Zone* zone,
+                    RootIterator root_begin, RootIterator root_end,
+                    Visitor* visitor) {
+    typedef typename Traits::Node Node;
+    typedef typename Traits::Iterator Iterator;
+    typedef std::pair<Iterator, Iterator> NodeState;
+    typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
+    NodeStateStack stack((ZoneDeque<NodeState>(zone)));
+    BoolVector visited(Traits::max_id(graph), false, zone);
+    Node* current = *root_begin;
+    while (true) {
+      DCHECK(current != NULL);
+      const int id = current->id();
+      DCHECK(id >= 0);
+      DCHECK(id < Traits::max_id(graph));  // Must be a valid id.
+      bool visit = !GetVisited(&visited, id);
+      if (visit) {
+        Control control = visitor->Pre(current);
+        visit = !IsSkip(control);
+        if (!IsReenter(control)) SetVisited(&visited, id, true);
+      }
+      Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
+      Iterator end(Traits::end(current));
+      stack.push(NodeState(begin, end));
+      Node* post_order_node = current;
+      while (true) {
+        NodeState top = stack.top();
+        if (top.first == top.second) {
+          if (visit) {
+            Control control = visitor->Post(post_order_node);
+            DCHECK(!IsSkip(control));
+            SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+          }
+          stack.pop();
+          if (stack.empty()) {
+            if (++root_begin == root_end) return;
+            current = *root_begin;
+            break;
+          }
+          post_order_node = Traits::from(stack.top().first);
+          visit = true;
+        } else {
+          visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
+                           Traits::to(top.first));
+          current = Traits::to(top.first);
+          if (!GetVisited(&visited, current->id())) break;
+        }
+        top = stack.top();
+        visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
+                          Traits::to(top.first));
+        ++stack.top().first;
+      }
+    }
+  }
+
+  template <class Visitor, class Traits>
+  static void Visit(GenericGraphBase* graph, Zone* zone,
+                    typename Traits::Node* current, Visitor* visitor) {
+    typename Traits::Node* array[] = {current};
+    Visit<Visitor, Traits>(graph, zone, &array[0], &array[1], visitor);
+  }
+
+  template <class B, class S>
+  struct NullNodeVisitor {
+    Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
+    Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+    void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+    void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+  };
+
+ private:
+  static bool IsSkip(Control c) { return c & SKIP; }
+  static bool IsReenter(Control c) { return c & REENTER; }
+
+  // TODO(turbofan): resizing could be optionally templatized away.
+  static void SetVisited(BoolVector* visited, int id, bool value) {
+    if (id >= static_cast<int>(visited->size())) {
+      // Resize and set all values to unvisited.
+      visited->resize((3 * id) / 2, false);
+    }
+    visited->at(id) = value;
+  }
+
+  static bool GetVisited(BoolVector* visited, int id) {
+    if (id >= static_cast<int>(visited->size())) return false;
+    return visited->at(id);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/src/compiler/generic-graph.h b/src/compiler/generic-graph.h
new file mode 100644
index 0000000..a555456
--- /dev/null
+++ b/src/compiler/generic-graph.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_GRAPH_H_
+#define V8_COMPILER_GENERIC_GRAPH_H_
+
+#include "src/compiler/generic-node.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class GenericGraphBase : public ZoneObject {
+ public:
+  explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
+
+  Zone* zone() const { return zone_; }
+
+  NodeId NextNodeID() { return next_node_id_++; }
+  NodeId NodeCount() const { return next_node_id_; }
+
+ private:
+  Zone* zone_;
+  NodeId next_node_id_;
+};
+
+template <class V>
+class GenericGraph : public GenericGraphBase {
+ public:
+  explicit GenericGraph(Zone* zone)
+      : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
+
+  V* start() { return start_; }
+  V* end() { return end_; }
+
+  void SetStart(V* start) { start_ = start; }
+  void SetEnd(V* end) { end_ = end; }
+
+ private:
+  V* start_;
+  V* end_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericGraph);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/src/compiler/generic-node-inl.h b/src/compiler/generic-node-inl.h
new file mode 100644
index 0000000..c2dc24e
--- /dev/null
+++ b/src/compiler/generic-node-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
+#define V8_COMPILER_GENERIC_NODE_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class B, class S>
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+    : BaseClass(graph->zone()),
+      input_count_(input_count),
+      has_appendable_inputs_(false),
+      use_count_(0),
+      first_use_(NULL),
+      last_use_(NULL) {
+  inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
+}
+
+template <class B, class S>
+inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
+  id_ = graph->NextNodeID();
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::begin() {
+  return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::end() {
+  return typename GenericNode<B, S>::Inputs::iterator(
+      this->node_, this->node_->InputCount());
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::begin() {
+  return typename GenericNode<B, S>::Uses::iterator(this->node_);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::end() {
+  return typename GenericNode<B, S>::Uses::iterator();
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL; use = use->next) {
+    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+  }
+  if (replace_to->last_use_ == NULL) {
+    DCHECK_EQ(NULL, replace_to->first_use_);
+    replace_to->first_use_ = first_use_;
+    replace_to->last_use_ = last_use_;
+  } else if (first_use_ != NULL) {
+    DCHECK_NE(NULL, replace_to->first_use_);
+    replace_to->last_use_->next = first_use_;
+    first_use_->prev = replace_to->last_use_;
+    replace_to->last_use_ = last_use_;
+  }
+  replace_to->use_count_ += use_count_;
+  use_count_ = 0;
+  first_use_ = NULL;
+  last_use_ = NULL;
+}
+
+template <class B, class S>
+template <class UnaryPredicate>
+void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
+                                      GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL;) {
+    Use* next = use->next;
+    if (pred(static_cast<S*>(use->from))) {
+      RemoveUse(use);
+      replace_to->AppendUse(use);
+      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+    }
+    use = next;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveAllInputs() {
+  for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
+       ++iter) {
+    iter.GetInput()->Update(NULL);
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::TrimInputCount(int new_input_count) {
+  if (new_input_count == input_count_) return;  // Nothing to do.
+
+  DCHECK(new_input_count < input_count_);
+
+  // Update inline inputs.
+  for (int i = new_input_count; i < input_count_; i++) {
+    typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
+    input->Update(NULL);
+  }
+  input_count_ = new_input_count;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
+  Input* input = GetInputRecordPtr(index);
+  input->Update(new_to);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
+  GenericNode* old_to = this->to;
+  if (new_to == old_to) return;  // Nothing to do.
+  // Snip out the use from where it used to be
+  if (old_to != NULL) {
+    old_to->RemoveUse(use);
+  }
+  to = new_to;
+  // And put it into the new node's use list.
+  if (new_to != NULL) {
+    new_to->AppendUse(use);
+  } else {
+    use->next = NULL;
+    use->prev = NULL;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
+  if (!has_appendable_inputs_) {
+    void* deque_buffer = zone->New(sizeof(InputDeque));
+    InputDeque* deque = new (deque_buffer) InputDeque(zone);
+    for (int i = 0; i < input_count_; ++i) {
+      deque->push_back(inputs_.static_[i]);
+    }
+    inputs_.appendable_ = deque;
+    has_appendable_inputs_ = true;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
+  EnsureAppendableInputs(zone);
+  Use* new_use = new (zone) Use;
+  Input new_input;
+  new_input.to = to_append;
+  new_input.use = new_use;
+  inputs_.appendable_->push_back(new_input);
+  new_use->input_index = input_count_;
+  new_use->from = this;
+  to_append->AppendUse(new_use);
+  input_count_++;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::InsertInput(Zone* zone, int index,
+                                    GenericNode<B, S>* to_insert) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  AppendInput(zone, InputAt(InputCount() - 1));
+  for (int i = InputCount() - 1; i > index; --i) {
+    ReplaceInput(i, InputAt(i - 1));
+  }
+  ReplaceInput(index, to_insert);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveInput(int index) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  for (; index < InputCount() - 1; ++index) {
+    ReplaceInput(index, InputAt(index + 1));
+  }
+  TrimInputCount(InputCount() - 1);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendUse(Use* use) {
+  use->next = NULL;
+  use->prev = last_use_;
+  if (last_use_ == NULL) {
+    first_use_ = use;
+  } else {
+    last_use_->next = use;
+  }
+  last_use_ = use;
+  ++use_count_;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveUse(Use* use) {
+  if (last_use_ == use) {
+    last_use_ = use->prev;
+  }
+  if (use->prev != NULL) {
+    use->prev->next = use->next;
+  } else {
+    first_use_ = use->next;
+  }
+  if (use->next != NULL) {
+    use->next->prev = use->prev;
+  }
+  --use_count_;
+}
+
+template <class B, class S>
+inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
+  return first_use_ != NULL && first_use_->from == owner &&
+         first_use_->next == NULL;
+}
+
+template <class B, class S>
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
+                          S** inputs) {
+  size_t node_size = sizeof(GenericNode);
+  size_t inputs_size = input_count * sizeof(Input);
+  size_t uses_size = input_count * sizeof(Use);
+  int size = static_cast<int>(node_size + inputs_size + uses_size);
+  Zone* zone = graph->zone();
+  void* buffer = zone->New(size);
+  S* result = new (buffer) S(graph, input_count);
+  Input* input =
+      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+  Use* use =
+      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+  for (int current = 0; current < input_count; ++current) {
+    GenericNode* to = *inputs++;
+    input->to = to;
+    input->use = use;
+    use->input_index = current;
+    use->from = result;
+    to->AppendUse(use);
+    ++use;
+    ++input;
+  }
+  return result;
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/src/compiler/generic-node.h b/src/compiler/generic-node.h
new file mode 100644
index 0000000..3dc324d
--- /dev/null
+++ b/src/compiler/generic-node.h
@@ -0,0 +1,272 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_H_
+#define V8_COMPILER_GENERIC_NODE_H_
+
+#include "src/v8.h"
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GenericGraphBase;
+
+typedef int NodeId;
+
+// A GenericNode<> is the basic primitive of graphs. GenericNode's are
+// chained together by input/use chains but by default otherwise contain only an
+// identifying number which specific applications of graphs and nodes can use
+// to index auxiliary out-of-line data, especially transient data.
+// Specializations of the templatized GenericNode<> class must provide a base
+// class B that contains all of the members to be made available in each
+// specialized Node instance. GenericNode uses a mixin template pattern to
+// ensure that common accessors and methods expect the derived class S type
+// rather than the GenericNode<B, S> type.
+template <class B, class S>
+class GenericNode : public B {
+ public:
+  typedef B BaseClass;
+  typedef S DerivedClass;
+
+  inline NodeId id() const { return id_; }
+
+  int InputCount() const { return input_count_; }
+  S* InputAt(int index) const {
+    return static_cast<S*>(GetInputRecordPtr(index)->to);
+  }
+  inline void ReplaceInput(int index, GenericNode* new_input);
+  inline void AppendInput(Zone* zone, GenericNode* new_input);
+  inline void InsertInput(Zone* zone, int index, GenericNode* new_input);
+  inline void RemoveInput(int index);
+
+  int UseCount() { return use_count_; }
+  S* UseAt(int index) {
+    DCHECK(index < use_count_);
+    Use* current = first_use_;
+    while (index-- != 0) {
+      current = current->next;
+    }
+    return static_cast<S*>(current->from);
+  }
+  inline void ReplaceUses(GenericNode* replace_to);
+  template <class UnaryPredicate>
+  inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
+  inline void RemoveAllInputs();
+
+  inline void TrimInputCount(int input_count);
+
+  class Inputs {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+
+    explicit Inputs(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Inputs inputs() { return Inputs(this); }
+
+  class Uses {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+    bool empty() { return begin() == end(); }
+
+    explicit Uses(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Uses uses() { return Uses(this); }
+
+  class Edge;
+
+  bool OwnedBy(GenericNode* owner) const;
+
+  static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+
+ protected:
+  friend class GenericGraphBase;
+
+  class Use : public ZoneObject {
+   public:
+    GenericNode* from;
+    Use* next;
+    Use* prev;
+    int input_index;
+  };
+
+  class Input {
+   public:
+    GenericNode* to;
+    Use* use;
+
+    void Update(GenericNode* new_to);
+  };
+
+  void EnsureAppendableInputs(Zone* zone);
+
+  Input* GetInputRecordPtr(int index) const {
+    if (has_appendable_inputs_) {
+      return &((*inputs_.appendable_)[index]);
+    } else {
+      return inputs_.static_ + index;
+    }
+  }
+
+  inline void AppendUse(Use* use);
+  inline void RemoveUse(Use* use);
+
+  void* operator new(size_t, void* location) { return location; }
+
+  GenericNode(GenericGraphBase* graph, int input_count);
+
+ private:
+  void AssignUniqueID(GenericGraphBase* graph);
+
+  typedef ZoneDeque<Input> InputDeque;
+
+  NodeId id_;
+  int input_count_ : 31;
+  bool has_appendable_inputs_ : 1;
+  union {
+    // When a node is initially allocated, it uses a static buffer to hold its
+    // inputs under the assumption that the number of outputs will not increase.
+    // When the first input is appended, the static buffer is converted into a
+    // deque to allow for space-efficient growing.
+    Input* static_;
+    InputDeque* appendable_;
+  } inputs_;
+  int use_count_;
+  Use* first_use_;
+  Use* last_use_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericNode);
+};
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the ndoe having the input.
+template <class B, class S>
+class GenericNode<B, S>::Edge {
+ public:
+  S* from() const { return static_cast<S*>(input_->use->from); }
+  S* to() const { return static_cast<S*>(input_->to); }
+  int index() const {
+    int index = input_->use->input_index;
+    DCHECK(index < input_->use->from->input_count_);
+    return index;
+  }
+
+ private:
+  friend class GenericNode<B, S>::Uses::iterator;
+  friend class GenericNode<B, S>::Inputs::iterator;
+
+  explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
+
+  typename GenericNode<B, S>::Input* input_;
+};
+
+// A forward iterator to visit the nodes which are depended upon by a node
+// in the order of input.
+template <class B, class S>
+class GenericNode<B, S>::Inputs::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Inputs::iterator& other)  // NOLINT
+      : node_(other.node_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(GetInput()->to); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(GetInput());
+  }
+  bool operator==(const iterator& other) const {
+    return other.index_ == index_ && other.node_ == node_;
+  }
+  bool operator!=(const iterator& other) const { return !(other == *this); }
+  iterator& operator++() {
+    DCHECK(node_ != NULL);
+    DCHECK(index_ < node_->input_count_);
+    ++index_;
+    return *this;
+  }
+  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+    typename GenericNode<B, S>::Input* input = GetInput();
+    input->Update(new_to);
+    index_++;
+    return *this;
+  }
+  int index() { return index_; }
+
+ private:
+  friend class GenericNode;
+
+  explicit iterator(GenericNode* node, int index)
+      : node_(node), index_(index) {}
+
+  Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
+
+  GenericNode* node_;
+  int index_;
+};
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+template <class B, class S>
+class GenericNode<B, S>::Uses::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Uses::iterator& other)  // NOLINT
+      : current_(other.current_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(current_->from); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(CurrentInput());
+  }
+
+  bool operator==(const iterator& other) { return other.current_ == current_; }
+  bool operator!=(const iterator& other) { return other.current_ != current_; }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    index_++;
+    current_ = current_->next;
+    return *this;
+  }
+  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+    DCHECK(current_ != NULL);
+    index_++;
+    typename GenericNode<B, S>::Input* input = CurrentInput();
+    current_ = current_->next;
+    input->Update(new_to);
+    return *this;
+  }
+  int index() const { return index_; }
+
+ private:
+  friend class GenericNode<B, S>::Uses;
+
+  iterator() : current_(NULL), index_(0) {}
+  explicit iterator(GenericNode<B, S>* node)
+      : current_(node->first_use_), index_(0) {}
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  typename GenericNode<B, S>::Use* current_;
+  int index_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_H_
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
new file mode 100644
index 0000000..8992881
--- /dev/null
+++ b/src/compiler/graph-builder.cc
@@ -0,0 +1,249 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+                                               CommonOperatorBuilder* common)
+    : GraphBuilder(graph),
+      common_(common),
+      environment_(NULL),
+      current_context_(NULL),
+      exit_control_(NULL) {}
+
+
+Node* StructuredGraphBuilder::MakeNode(const Operator* op,
+                                       int value_input_count,
+                                       Node** value_inputs) {
+  DCHECK(op->InputCount() == value_input_count);
+
+  bool has_context = OperatorProperties::HasContextInput(op);
+  bool has_framestate = OperatorProperties::HasFrameStateInput(op);
+  bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+  bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+  DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
+  DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+
+  Node* result = NULL;
+  if (!has_context && !has_framestate && !has_control && !has_effect) {
+    result = graph()->NewNode(op, value_input_count, value_inputs);
+  } else {
+    int input_count_with_deps = value_input_count;
+    if (has_context) ++input_count_with_deps;
+    if (has_framestate) ++input_count_with_deps;
+    if (has_control) ++input_count_with_deps;
+    if (has_effect) ++input_count_with_deps;
+    Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
+    memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+    Node** current_input = buffer + value_input_count;
+    if (has_context) {
+      *current_input++ = current_context();
+    }
+    if (has_framestate) {
+      // The frame state will be inserted later. Here we misuse
+      // the dead_control node as a sentinel to be later overwritten
+      // with the real frame state.
+      *current_input++ = dead_control();
+    }
+    if (has_effect) {
+      *current_input++ = environment_->GetEffectDependency();
+    }
+    if (has_control) {
+      *current_input++ = environment_->GetControlDependency();
+    }
+    result = graph()->NewNode(op, input_count_with_deps, buffer);
+    if (has_effect) {
+      environment_->UpdateEffectDependency(result);
+    }
+    if (OperatorProperties::HasControlOutput(result->op()) &&
+        !environment()->IsMarkedAsUnreachable()) {
+      environment_->UpdateControlDependency(result);
+    }
+  }
+
+  return result;
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
+    Node* exit) {
+  if (environment()->IsMarkedAsUnreachable()) return;
+  if (exit_control() != NULL) {
+    exit = MergeControl(exit_control(), exit);
+  }
+  environment()->MarkAsUnreachable();
+  set_exit_control(exit);
+}
+
+
+StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
+    Environment* env) {
+  return new (zone()) Environment(*env);
+}
+
+
+StructuredGraphBuilder::Environment::Environment(
+    StructuredGraphBuilder* builder, Node* control_dependency)
+    : builder_(builder),
+      control_dependency_(control_dependency),
+      effect_dependency_(control_dependency),
+      values_(zone()) {}
+
+
+StructuredGraphBuilder::Environment::Environment(const Environment& copy)
+    : builder_(copy.builder()),
+      control_dependency_(copy.control_dependency_),
+      effect_dependency_(copy.effect_dependency_),
+      values_(copy.values_) {}
+
+
+void StructuredGraphBuilder::Environment::Merge(Environment* other) {
+  DCHECK(values_.size() == other->values_.size());
+
+  // Nothing to do if the other environment is dead.
+  if (other->IsMarkedAsUnreachable()) return;
+
+  // Resurrect a dead environment by copying the contents of the other one and
+  // placing a singleton merge as the new control dependency.
+  if (this->IsMarkedAsUnreachable()) {
+    Node* other_control = other->control_dependency_;
+    control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+    effect_dependency_ = other->effect_dependency_;
+    values_ = other->values_;
+    return;
+  }
+
+  // Create a merge of the control dependencies of both environments and update
+  // the current environment's control dependency accordingly.
+  Node* control = builder_->MergeControl(this->GetControlDependency(),
+                                         other->GetControlDependency());
+  UpdateControlDependency(control);
+
+  // Create a merge of the effect dependencies of both environments and update
+  // the current environment's effect dependency accordingly.
+  Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+                                       other->GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+
+  // Introduce Phi nodes for values that have differing input at merge points,
+  // potentially extending an existing Phi node if possible.
+  for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+    values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+  }
+}
+
+
+void StructuredGraphBuilder::Environment::PrepareForLoop() {
+  Node* control = GetControlDependency();
+  for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
+    Node* phi = builder_->NewPhi(1, values()->at(i), control);
+    values()->at(i) = phi;
+  }
+  Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+}
+
+
+Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+  const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
+                                           Node* control) {
+  const Operator* phi_op = common()->EffectPhi(count);
+  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1;
+  if (control->opcode() == IrOpcode::kLoop) {
+    // Control node for loop exists, add input.
+    const Operator* op = common()->Loop(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else if (control->opcode() == IrOpcode::kMerge) {
+    // Control node for merge exists, add input.
+    const Operator* op = common()->Merge(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else {
+    // Control node is a singleton, introduce a merge.
+    const Operator* op = common()->Merge(inputs);
+    control = graph()->NewNode(op, control, other);
+  }
+  return control;
+}
+
+
+Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
+                                          Node* control) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  if (value->opcode() == IrOpcode::kEffectPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->EffectPhi(inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewEffectPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
+                                         Node* control) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  if (value->opcode() == IrOpcode::kPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->Phi(kMachAnyTagged, inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::dead_control() {
+  if (!dead_control_.is_set()) {
+    Node* dead_node = graph()->NewNode(common_->Dead());
+    dead_control_.set(dead_node);
+    return dead_node;
+  }
+  return dead_control_.get();
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
new file mode 100644
index 0000000..c966c29
--- /dev/null
+++ b/src/compiler/graph-builder.h
@@ -0,0 +1,230 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_BUILDER_H_
+#define V8_COMPILER_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+
+// A common base class for anything that creates nodes in a graph.
+class GraphBuilder {
+ public:
+  explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+  virtual ~GraphBuilder() {}
+
+  Node* NewNode(const Operator* op) {
+    return MakeNode(op, 0, static_cast<Node**>(NULL));
+  }
+
+  Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+    Node* buffer[] = {n1, n2};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* buffer[] = {n1, n2, n3};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* buffer[] = {n1, n2, n3, n4};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* buffer[] = {n1, n2, n3, n4, n5};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return MakeNode(op, arraysize(nodes), nodes);
+  }
+
+  Node* NewNode(const Operator* op, int value_input_count,
+                Node** value_inputs) {
+    return MakeNode(op, value_input_count, value_inputs);
+  }
+
+  Graph* graph() const { return graph_; }
+
+ protected:
+  // Base implementation used by all factory methods.
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) = 0;
+
+ private:
+  Graph* graph_;
+};
+
+
+// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
+// base class for concrete implementations (e.g the AstGraphBuilder or the
+// StubGraphBuilder).
+class StructuredGraphBuilder : public GraphBuilder {
+ public:
+  StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
+  virtual ~StructuredGraphBuilder() {}
+
+  // Creates a new Phi node having {count} input values.
+  Node* NewPhi(int count, Node* input, Node* control);
+  Node* NewEffectPhi(int count, Node* input, Node* control);
+
+  // Helpers for merging control, effect or value dependencies.
+  Node* MergeControl(Node* control, Node* other);
+  Node* MergeEffect(Node* value, Node* other, Node* control);
+  Node* MergeValue(Node* value, Node* other, Node* control);
+
+  // Helpers to create new control nodes.
+  Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+  Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+  Node* NewMerge() { return NewNode(common()->Merge(1)); }
+  Node* NewLoop() { return NewNode(common()->Loop(1)); }
+  Node* NewBranch(Node* condition) {
+    return NewNode(common()->Branch(), condition);
+  }
+
+ protected:
+  class Environment;
+  friend class Environment;
+  friend class ControlBuilder;
+
+  // The following method creates a new node having the specified operator and
+  // ensures effect and control dependencies are wired up. The dependencies
+  // tracked by the environment might be mutated.
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) FINAL;
+
+  Environment* environment() const { return environment_; }
+  void set_environment(Environment* env) { environment_ = env; }
+
+  Node* current_context() const { return current_context_; }
+  void set_current_context(Node* context) { current_context_ = context; }
+
+  Node* exit_control() const { return exit_control_; }
+  void set_exit_control(Node* node) { exit_control_ = node; }
+
+  Node* dead_control();
+
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  CommonOperatorBuilder* common() const { return common_; }
+
+  // Helper to wrap a Handle<T> into a Unique<T>.
+  template <class T>
+  Unique<T> MakeUnique(Handle<T> object) {
+    return Unique<T>::CreateUninitialized(object);
+  }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  virtual Environment* CopyEnvironment(Environment* env);
+
+  // Helper to indicate a node exits the function body.
+  void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ private:
+  CommonOperatorBuilder* common_;
+  Environment* environment_;
+
+  // Node representing the control dependency for dead code.
+  SetOncePointer<Node> dead_control_;
+
+  // Node representing the current context within the function body.
+  Node* current_context_;
+
+  // Merge of all control nodes that exit the function body.
+  Node* exit_control_;
+
+  DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
+};
+
+
+// The abstract execution environment contains static knowledge about
+// execution state at arbitrary control-flow points. It allows for
+// simulation of the control-flow at compile time.
+class StructuredGraphBuilder::Environment : public ZoneObject {
+ public:
+  Environment(StructuredGraphBuilder* builder, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  // Control dependency tracked by this environment.
+  Node* GetControlDependency() { return control_dependency_; }
+  void UpdateControlDependency(Node* dependency) {
+    control_dependency_ = dependency;
+  }
+
+  // Effect dependency tracked by this environment.
+  Node* GetEffectDependency() { return effect_dependency_; }
+  void UpdateEffectDependency(Node* dependency) {
+    effect_dependency_ = dependency;
+  }
+
+  // Mark this environment as being unreachable.
+  void MarkAsUnreachable() {
+    UpdateControlDependency(builder()->dead_control());
+  }
+  bool IsMarkedAsUnreachable() {
+    return GetControlDependency()->opcode() == IrOpcode::kDead;
+  }
+
+  // Merge another environment into this one.
+  void Merge(Environment* other);
+
+  // Copies this environment at a control-flow split point.
+  Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
+
+  // Copies this environment to a potentially unreachable control-flow point.
+  Environment* CopyAsUnreachable() {
+    Environment* env = builder()->CopyEnvironment(this);
+    env->MarkAsUnreachable();
+    return env;
+  }
+
+  // Copies this environment at a loop header control-flow point.
+  Environment* CopyForLoop() {
+    PrepareForLoop();
+    return builder()->CopyEnvironment(this);
+  }
+
+  Node* GetContext() { return builder_->current_context(); }
+
+ protected:
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Graph* graph() const { return builder_->graph(); }
+  StructuredGraphBuilder* builder() const { return builder_; }
+  CommonOperatorBuilder* common() { return builder_->common(); }
+  NodeVector* values() { return &values_; }
+
+  // Prepare environment to be used as loop header.
+  void PrepareForLoop();
+
+ private:
+  StructuredGraphBuilder* builder_;
+  Node* control_dependency_;
+  Node* effect_dependency_;
+  NodeVector values_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/src/compiler/graph-inl.h b/src/compiler/graph-inl.h
new file mode 100644
index 0000000..571ffb3
--- /dev/null
+++ b/src/compiler/graph-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_INL_H_
+#define V8_COMPILER_GRAPH_INL_H_
+
+#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class Visitor>
+void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(
+      this, zone(), node, visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
+  VisitNodeUsesFrom(start(), visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
+      this, zone(), end(), visitor);
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_INL_H_
diff --git a/src/compiler/graph-reducer-unittest.cc b/src/compiler/graph-reducer-unittest.cc
new file mode 100644
index 0000000..6567203
--- /dev/null
+++ b/src/compiler/graph-reducer-unittest.cc
@@ -0,0 +1,114 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::DefaultValue;
+using testing::Return;
+using testing::Sequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+SimpleOperator OP0(0, Operator::kNoWrite, 0, 1, "op0");
+SimpleOperator OP1(1, Operator::kNoProperties, 1, 1, "op1");
+
+
+struct MockReducer : public Reducer {
+  MOCK_METHOD1(Reduce, Reduction(Node*));
+};
+
+}  // namespace
+
+
+class GraphReducerTest : public TestWithZone {
+ public:
+  GraphReducerTest() : graph_(zone()) {}
+
+  static void SetUpTestCase() {
+    TestWithZone::SetUpTestCase();
+    DefaultValue<Reduction>::Set(Reducer::NoChange());
+  }
+
+  static void TearDownTestCase() {
+    DefaultValue<Reduction>::Clear();
+    TestWithZone::TearDownTestCase();
+  }
+
+ protected:
+  void ReduceNode(Node* node, Reducer* r) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r);
+    reducer.ReduceNode(node);
+  }
+
+  void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r1);
+    reducer.AddReducer(r2);
+    reducer.ReduceNode(node);
+  }
+
+  void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r1);
+    reducer.AddReducer(r2);
+    reducer.AddReducer(r3);
+    reducer.ReduceNode(node);
+  }
+
+  Graph* graph() { return &graph_; }
+
+ private:
+  Graph graph_;
+};
+
+
+TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
+  StrictMock<MockReducer> r;
+  Node* node0 = graph()->NewNode(&OP0);
+  Node* node1 = graph()->NewNode(&OP1, node0);
+  Node* node2 = graph()->NewNode(&OP1, node0);
+  EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
+  ReduceNode(node1, &r);
+  EXPECT_FALSE(node0->IsDead());
+  EXPECT_TRUE(node1->IsDead());
+  EXPECT_FALSE(node2->IsDead());
+}
+
+
+TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
+  StrictMock<MockReducer> r1, r2;
+  Node* node0 = graph()->NewNode(&OP0);
+  EXPECT_CALL(r1, Reduce(node0));
+  EXPECT_CALL(r2, Reduce(node0));
+  ReduceNode(node0, &r1, &r2);
+}
+
+
+TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
+  Sequence s1, s2;
+  StrictMock<MockReducer> r1, r2, r3;
+  Node* node0 = graph()->NewNode(&OP0);
+  EXPECT_CALL(r1, Reduce(node0));
+  EXPECT_CALL(r2, Reduce(node0));
+  EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2).WillOnce(
+      Return(Reducer::Changed(node0)));
+  EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
+  EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
+  ReduceNode(node0, &r1, &r2, &r3);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
new file mode 100644
index 0000000..36a54e0
--- /dev/null
+++ b/src/compiler/graph-reducer.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-reducer.h"
+
+#include <functional>
+
+#include "src/compiler/graph-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphReducer::GraphReducer(Graph* graph)
+    : graph_(graph), reducers_(graph->zone()) {}
+
+
+static bool NodeIdIsLessThan(const Node* node, NodeId id) {
+  return node->id() < id;
+}
+
+
+void GraphReducer::ReduceNode(Node* node) {
+  ZoneVector<Reducer*>::iterator skip = reducers_.end();
+  static const unsigned kMaxAttempts = 16;
+  bool reduce = true;
+  for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
+    if (!reduce) return;
+    reduce = false;  // Assume we don't need to rerun any reducers.
+    int before = graph_->NodeCount();
+    for (ZoneVector<Reducer*>::iterator i = reducers_.begin();
+         i != reducers_.end(); ++i) {
+      if (i == skip) continue;  // Skip this reducer.
+      Reduction reduction = (*i)->Reduce(node);
+      Node* replacement = reduction.replacement();
+      if (replacement == NULL) {
+        // No change from this reducer.
+      } else if (replacement == node) {
+        // {replacement == node} represents an in-place reduction.
+        // Rerun all the reducers except the current one for this node,
+        // as now there may be more opportunities for reduction.
+        reduce = true;
+        skip = i;
+        break;
+      } else {
+        if (node == graph_->start()) graph_->SetStart(replacement);
+        if (node == graph_->end()) graph_->SetEnd(replacement);
+        // If {node} was replaced by an old node, unlink {node} and assume that
+        // {replacement} was already reduced and finish.
+        if (replacement->id() < before) {
+          node->ReplaceUses(replacement);
+          node->Kill();
+          return;
+        }
+        // Otherwise, {node} was replaced by a new node. Replace all old uses of
+        // {node} with {replacement}. New nodes created by this reduction can
+        // use {node}.
+        node->ReplaceUsesIf(
+            std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
+        // Unlink {node} if it's no longer used.
+        if (node->uses().empty()) {
+          node->Kill();
+        }
+        // Rerun all the reductions on the {replacement}.
+        skip = reducers_.end();
+        node = replacement;
+        reduce = true;
+        break;
+      }
+    }
+  }
+}
+
+
+// A helper class to reuse the node traversal algorithm.
+struct GraphReducerVisitor FINAL : public NullNodeVisitor {
+  explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
+  GenericGraphVisit::Control Post(Node* node) {
+    reducer_->ReduceNode(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  GraphReducer* reducer_;
+};
+
+
+void GraphReducer::ReduceGraph() {
+  GraphReducerVisitor visitor(this);
+  // Perform a post-order reduction of all nodes starting from the end.
+  graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(titzer): partial graph reductions.
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
new file mode 100644
index 0000000..e0e4f7a
--- /dev/null
+++ b/src/compiler/graph-reducer.h
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REDUCER_H_
+#define V8_COMPILER_GRAPH_REDUCER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Represents the result of trying to reduce a node in the graph.
+class Reduction FINAL {
+ public:
+  explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
+
+  Node* replacement() const { return replacement_; }
+  bool Changed() const { return replacement() != NULL; }
+
+ private:
+  Node* replacement_;
+};
+
+
+// A reducer can reduce or simplify a given node based on its operator and
+// inputs. This class functions as an extension point for the graph reducer for
+// language-specific reductions (e.g. reduction based on types or constant
+// folding of low-level operators) can be integrated into the graph reduction
+// phase.
+class Reducer {
+ public:
+  Reducer() {}
+  virtual ~Reducer() {}
+
+  // Try to reduce a node if possible.
+  virtual Reduction Reduce(Node* node) = 0;
+
+  // Helper functions for subclasses to produce reductions for a node.
+  static Reduction NoChange() { return Reduction(); }
+  static Reduction Replace(Node* node) { return Reduction(node); }
+  static Reduction Changed(Node* node) { return Reduction(node); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Reducer);
+};
+
+
+// Performs an iterative reduction of a node graph.
+class GraphReducer FINAL {
+ public:
+  explicit GraphReducer(Graph* graph);
+
+  Graph* graph() const { return graph_; }
+
+  void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+
+  // Reduce a single node.
+  void ReduceNode(Node* node);
+  // Reduce the whole graph.
+  void ReduceGraph();
+
+ private:
+  Graph* graph_;
+  ZoneVector<Reducer*> reducers_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphReducer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_REDUCER_H_
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
new file mode 100644
index 0000000..494d431
--- /dev/null
+++ b/src/compiler/graph-replay.cc
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-replay.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef DEBUG
+
+void GraphReplayPrinter::PrintReplay(Graph* graph) {
+  GraphReplayPrinter replay;
+  PrintF("  Node* nil = graph.NewNode(common_builder.Dead());\n");
+  graph->VisitNodeInputsFromEnd(&replay);
+}
+
+
+GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+  PrintReplayOpCreator(node->op());
+  PrintF("  Node* n%d = graph.NewNode(op", node->id());
+  for (int i = 0; i < node->InputCount(); ++i) {
+    PrintF(", nil");
+  }
+  PrintF("); USE(n%d);\n", node->id());
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
+  PrintF("  n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+}
+
+
+void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  const char* builder =
+      IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+  const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
+                             ? IrOpcode::Mnemonic(opcode)
+                             : IrOpcode::Mnemonic(opcode) + 2;
+  PrintF("  op = %s.%s(", builder, mnemonic);
+  switch (opcode) {
+    case IrOpcode::kParameter:
+    case IrOpcode::kNumberConstant:
+      PrintF("0");
+      break;
+    case IrOpcode::kLoad:
+      PrintF("unique_name");
+      break;
+    case IrOpcode::kHeapConstant:
+      PrintF("unique_constant");
+      break;
+    case IrOpcode::kPhi:
+      PrintF("%d", op->InputCount());
+      break;
+    case IrOpcode::kEffectPhi:
+      PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      PrintF("%d", OperatorProperties::GetControlInputCount(op));
+      break;
+    default:
+      break;
+  }
+  PrintF(");\n");
+}
+
+#endif  // DEBUG
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-replay.h b/src/compiler/graph-replay.h
new file mode 100644
index 0000000..53d5247
--- /dev/null
+++ b/src/compiler/graph-replay.h
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REPLAY_H_
+#define V8_COMPILER_GRAPH_REPLAY_H_
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+
+// Helper class to print a full replay of a graph. This replay can be used to
+// materialize the same graph within a C++ unit test and hence test subsequent
+// optimization passes on a graph without going through the construction steps.
+class GraphReplayPrinter FINAL : public NullNodeVisitor {
+ public:
+#ifdef DEBUG
+  static void PrintReplay(Graph* graph);
+#else
+  static void PrintReplay(Graph* graph) {}
+#endif
+
+  GenericGraphVisit::Control Pre(Node* node);
+  void PostEdge(Node* from, int index, Node* to);
+
+ private:
+  GraphReplayPrinter() {}
+
+  static void PrintReplayOpCreator(const Operator* op);
+
+  DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/src/compiler/graph-unittest.cc b/src/compiler/graph-unittest.cc
new file mode 100644
index 0000000..75e70cb
--- /dev/null
+++ b/src/compiler/graph-unittest.cc
@@ -0,0 +1,779 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+
+#include <ostream>  // NOLINT(readability/streams)
+
+#include "src/compiler/node-properties-inl.h"
+
+using testing::_;
+using testing::MakeMatcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+
+// TODO(bmeurer): Find a new home for these functions.
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const Unique<T>& value) {
+  return os << *value.handle();
+}
+inline std::ostream& operator<<(std::ostream& os,
+                                const ExternalReference& value) {
+  OStringStream ost;
+  compiler::StaticParameterTraits<ExternalReference>::PrintTo(ost, value);
+  return os << ost.c_str();
+}
+
+namespace compiler {
+
+GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
+  graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+}
+
+
+GraphTest::~GraphTest() {}
+
+
+Node* GraphTest::Parameter(int32_t index) {
+  return graph()->NewNode(common()->Parameter(index), graph()->start());
+}
+
+
+Node* GraphTest::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
+  return graph()->NewNode(common()->Float64Constant(value));
+}
+
+
+Node* GraphTest::Int32Constant(int32_t value) {
+  return graph()->NewNode(common()->Int32Constant(value));
+}
+
+
+Node* GraphTest::Int64Constant(int64_t value) {
+  return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+Node* GraphTest::NumberConstant(volatile double value) {
+  return graph()->NewNode(common()->NumberConstant(value));
+}
+
+
+Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
+  return graph()->NewNode(common()->HeapConstant(value));
+}
+
+
+Node* GraphTest::FalseConstant() {
+  return HeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Node* GraphTest::TrueConstant() {
+  return HeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsFalseConstant() {
+  return IsHeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsTrueConstant() {
+  return IsHeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+namespace {
+
+template <typename T>
+bool PrintMatchAndExplain(const T& value, const char* value_name,
+                          const Matcher<T>& value_matcher,
+                          MatchResultListener* listener) {
+  StringMatchResultListener value_listener;
+  if (!value_matcher.MatchAndExplain(value, &value_listener)) {
+    *listener << "whose " << value_name << " " << value << " doesn't match";
+    if (value_listener.str() != "") {
+      *listener << ", " << value_listener.str();
+    }
+    return false;
+  }
+  return true;
+}
+
+
+class NodeMatcher : public MatcherInterface<Node*> {
+ public:
+  explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    *os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    if (node == NULL) {
+      *listener << "which is NULL";
+      return false;
+    }
+    if (node->opcode() != opcode_) {
+      *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode())
+                << " but should have been " << IrOpcode::Mnemonic(opcode_);
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  const IrOpcode::Value opcode_;
+};
+
+
+class IsBranchMatcher FINAL : public NodeMatcher {
+ public:
+  IsBranchMatcher(const Matcher<Node*>& value_matcher,
+                  const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kBranch),
+        value_matcher_(value_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsMergeMatcher FINAL : public NodeMatcher {
+ public:
+  IsMergeMatcher(const Matcher<Node*>& control0_matcher,
+                 const Matcher<Node*>& control1_matcher)
+      : NodeMatcher(IrOpcode::kMerge),
+        control0_matcher_(control0_matcher),
+        control1_matcher_(control1_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose control0 (";
+    control0_matcher_.DescribeTo(os);
+    *os << ") and control1 (";
+    control1_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+                                 "control0", control0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+                                 "control1", control1_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> control0_matcher_;
+  const Matcher<Node*> control1_matcher_;
+};
+
+
+class IsControl1Matcher FINAL : public NodeMatcher {
+ public:
+  IsControl1Matcher(IrOpcode::Value opcode,
+                    const Matcher<Node*>& control_matcher)
+      : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsFinishMatcher FINAL : public NodeMatcher {
+ public:
+  IsFinishMatcher(const Matcher<Node*>& value_matcher,
+                  const Matcher<Node*>& effect_matcher)
+      : NodeMatcher(IrOpcode::kFinish),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+};
+
+
+template <typename T>
+class IsConstantMatcher FINAL : public NodeMatcher {
+ public:
+  IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
+      : NodeMatcher(opcode), value_matcher_(value_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
+                                 listener));
+  }
+
+ private:
+  const Matcher<T> value_matcher_;
+};
+
+
+class IsPhiMatcher FINAL : public NodeMatcher {
+ public:
+  IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+               const Matcher<Node*>& value0_matcher,
+               const Matcher<Node*>& value1_matcher,
+               const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kPhi),
+        type_matcher_(type_matcher),
+        value0_matcher_(value0_matcher),
+        value1_matcher_(value1_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose type (";
+    type_matcher_.DescribeTo(os);
+    *os << "), value0 (";
+    value0_matcher_.DescribeTo(os);
+    *os << "), value1 (";
+    value1_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+                                 type_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value0", value0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "value1", value1_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<MachineType> type_matcher_;
+  const Matcher<Node*> value0_matcher_;
+  const Matcher<Node*> value1_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsProjectionMatcher FINAL : public NodeMatcher {
+ public:
+  IsProjectionMatcher(const Matcher<size_t>& index_matcher,
+                      const Matcher<Node*>& base_matcher)
+      : NodeMatcher(IrOpcode::kProjection),
+        index_matcher_(index_matcher),
+        base_matcher_(base_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose index (";
+    index_matcher_.DescribeTo(os);
+    *os << ") and base (";
+    base_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<size_t>(node), "index",
+                                 index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener));
+  }
+
+ private:
+  const Matcher<size_t> index_matcher_;
+  const Matcher<Node*> base_matcher_;
+};
+
+
+class IsCallMatcher FINAL : public NodeMatcher {
+ public:
+  IsCallMatcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+                const Matcher<Node*>& value0_matcher,
+                const Matcher<Node*>& value1_matcher,
+                const Matcher<Node*>& value2_matcher,
+                const Matcher<Node*>& value3_matcher,
+                const Matcher<Node*>& effect_matcher,
+                const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kCall),
+        descriptor_matcher_(descriptor_matcher),
+        value0_matcher_(value0_matcher),
+        value1_matcher_(value1_matcher),
+        value2_matcher_(value2_matcher),
+        value3_matcher_(value3_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value0 (";
+    value0_matcher_.DescribeTo(os);
+    *os << ") and value1 (";
+    value1_matcher_.DescribeTo(os);
+    *os << ") and value2 (";
+    value2_matcher_.DescribeTo(os);
+    *os << ") and value3 (";
+    value3_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
+                                 "descriptor", descriptor_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value0", value0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "value1", value1_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "value2", value2_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+                                 "value3", value3_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<CallDescriptor*> descriptor_matcher_;
+  const Matcher<Node*> value0_matcher_;
+  const Matcher<Node*> value1_matcher_;
+  const Matcher<Node*> value2_matcher_;
+  const Matcher<Node*> value3_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadMatcher FINAL : public NodeMatcher {
+ public:
+  IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
+                const Matcher<Node*>& base_matcher,
+                const Matcher<Node*>& index_matcher,
+                const Matcher<Node*>& effect_matcher)
+      : NodeMatcher(IrOpcode::kLoad),
+        rep_matcher_(rep_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        effect_matcher_(effect_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose rep (";
+    rep_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
+                                 rep_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener));
+  }
+
+ private:
+  const Matcher<LoadRepresentation> rep_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> effect_matcher_;
+};
+
+
+class IsStoreMatcher FINAL : public NodeMatcher {
+ public:
+  IsStoreMatcher(const Matcher<MachineType>& type_matcher,
+                 const Matcher<WriteBarrierKind> write_barrier_matcher,
+                 const Matcher<Node*>& base_matcher,
+                 const Matcher<Node*>& index_matcher,
+                 const Matcher<Node*>& value_matcher,
+                 const Matcher<Node*>& effect_matcher,
+                 const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kStore),
+        type_matcher_(type_matcher),
+        write_barrier_matcher_(write_barrier_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose type (";
+    type_matcher_.DescribeTo(os);
+    *os << "), write barrier (";
+    write_barrier_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << "), value (";
+    value_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(
+                OpParameter<StoreRepresentation>(node).machine_type(), "type",
+                type_matcher_, listener) &&
+            PrintMatchAndExplain(
+                OpParameter<StoreRepresentation>(node).write_barrier_kind(),
+                "write barrier", write_barrier_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<MachineType> type_matcher_;
+  const Matcher<WriteBarrierKind> write_barrier_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsBinopMatcher FINAL : public NodeMatcher {
+ public:
+  IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
+                 const Matcher<Node*>& rhs_matcher)
+      : NodeMatcher(opcode),
+        lhs_matcher_(lhs_matcher),
+        rhs_matcher_(rhs_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose lhs (";
+    lhs_matcher_.DescribeTo(os);
+    *os << ") and rhs (";
+    rhs_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+                                 lhs_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+                                 rhs_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> lhs_matcher_;
+  const Matcher<Node*> rhs_matcher_;
+};
+
+
+class IsUnopMatcher FINAL : public NodeMatcher {
+ public:
+  IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
+      : NodeMatcher(opcode), input_matcher_(input_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose input (";
+    input_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "input", input_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> input_matcher_;
+};
+}
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsBranchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+                       const Matcher<Node*>& control1_matcher) {
+  return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+}
+
+
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
+}
+
+
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(
+      new IsControl1Matcher(IrOpcode::kIfFalse, control_matcher));
+}
+
+
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(
+      new IsControl1Matcher(IrOpcode::kControlEffect, control_matcher));
+}
+
+
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
+  return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
+}
+
+
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& effect_matcher) {
+  return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsExternalConstant(
+    const Matcher<ExternalReference>& value_matcher) {
+  return MakeMatcher(new IsConstantMatcher<ExternalReference>(
+      IrOpcode::kExternalConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsHeapConstant(
+    const Matcher<Unique<HeapObject> >& value_matcher) {
+  return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
+      IrOpcode::kHeapConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+                     const Matcher<Node*>& value0_matcher,
+                     const Matcher<Node*>& value1_matcher,
+                     const Matcher<Node*>& merge_matcher) {
+  return MakeMatcher(new IsPhiMatcher(type_matcher, value0_matcher,
+                                      value1_matcher, merge_matcher));
+}
+
+
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+                            const Matcher<Node*>& base_matcher) {
+  return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+                      const Matcher<Node*>& value0_matcher,
+                      const Matcher<Node*>& value1_matcher,
+                      const Matcher<Node*>& value2_matcher,
+                      const Matcher<Node*>& value3_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsCallMatcher(
+      descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
+      value3_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+                      const Matcher<Node*>& base_matcher,
+                      const Matcher<Node*>& index_matcher,
+                      const Matcher<Node*>& effect_matcher) {
+  return MakeMatcher(new IsLoadMatcher(rep_matcher, base_matcher, index_matcher,
+                                       effect_matcher));
+}
+
+
+Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
+                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
+                       const Matcher<Node*>& base_matcher,
+                       const Matcher<Node*>& index_matcher,
+                       const Matcher<Node*>& value_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsStoreMatcher(
+      type_matcher, write_barrier_matcher, base_matcher, index_matcher,
+      value_matcher, effect_matcher, control_matcher));
+}
+
+
+#define IS_BINOP_MATCHER(Name)                                            \
+  Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher,              \
+                          const Matcher<Node*>& rhs_matcher) {            \
+    return MakeMatcher(                                                   \
+        new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
+  }
+IS_BINOP_MATCHER(NumberLessThan)
+IS_BINOP_MATCHER(Word32And)
+IS_BINOP_MATCHER(Word32Sar)
+IS_BINOP_MATCHER(Word32Shl)
+IS_BINOP_MATCHER(Word32Ror)
+IS_BINOP_MATCHER(Word32Equal)
+IS_BINOP_MATCHER(Word64And)
+IS_BINOP_MATCHER(Word64Sar)
+IS_BINOP_MATCHER(Word64Shl)
+IS_BINOP_MATCHER(Word64Equal)
+IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Mul)
+IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+#undef IS_BINOP_MATCHER
+
+
+#define IS_UNOP_MATCHER(Name)                                                \
+  Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) {             \
+    return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
+  }
+IS_UNOP_MATCHER(ChangeFloat64ToInt32)
+IS_UNOP_MATCHER(ChangeFloat64ToUint32)
+IS_UNOP_MATCHER(ChangeInt32ToFloat64)
+IS_UNOP_MATCHER(ChangeInt32ToInt64)
+IS_UNOP_MATCHER(ChangeUint32ToFloat64)
+IS_UNOP_MATCHER(ChangeUint32ToUint64)
+IS_UNOP_MATCHER(TruncateFloat64ToInt32)
+IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
+#undef IS_UNOP_MATCHER
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-unittest.h b/src/compiler/graph-unittest.h
new file mode 100644
index 0000000..1dc9c3d
--- /dev/null
+++ b/src/compiler/graph-unittest.h
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_UNITTEST_H_
+#define V8_COMPILER_GRAPH_UNITTEST_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HeapObject;
+template <class T>
+class Unique;
+
+namespace compiler {
+
+using ::testing::Matcher;
+
+
+class GraphTest : public TestWithContext, public TestWithZone {
+ public:
+  explicit GraphTest(int parameters = 1);
+  virtual ~GraphTest();
+
+ protected:
+  Node* Parameter(int32_t index);
+  Node* Float32Constant(volatile float value);
+  Node* Float64Constant(volatile double value);
+  Node* Int32Constant(int32_t value);
+  Node* Int64Constant(int64_t value);
+  Node* NumberConstant(volatile double value);
+  Node* HeapConstant(const Unique<HeapObject>& value);
+  Node* FalseConstant();
+  Node* TrueConstant();
+
+  Matcher<Node*> IsFalseConstant();
+  Matcher<Node*> IsTrueConstant();
+
+  CommonOperatorBuilder* common() { return &common_; }
+  Graph* graph() { return &graph_; }
+
+ private:
+  CommonOperatorBuilder common_;
+  Graph graph_;
+};
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+                       const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsExternalConstant(
+    const Matcher<ExternalReference>& value_matcher);
+Matcher<Node*> IsHeapConstant(
+    const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+                     const Matcher<Node*>& value0_matcher,
+                     const Matcher<Node*>& value1_matcher,
+                     const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+                            const Matcher<Node*>& base_matcher);
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+                      const Matcher<Node*>& value0_matcher,
+                      const Matcher<Node*>& value1_matcher,
+                      const Matcher<Node*>& value2_matcher,
+                      const Matcher<Node*>& value3_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher);
+
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+                                const Matcher<Node*>& rhs_matcher);
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+                      const Matcher<Node*>& base_matcher,
+                      const Matcher<Node*>& index_matcher,
+                      const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
+                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
+                       const Matcher<Node*>& base_matcher,
+                       const Matcher<Node*>& index_matcher,
+                       const Matcher<Node*>& value_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
+                                      const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+                          const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+                                       const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+
+}  //  namespace compiler
+}  //  namespace internal
+}  //  namespace v8
+
+#endif  // V8_COMPILER_GRAPH_UNITTEST_H_
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
new file mode 100644
index 0000000..10d6698
--- /dev/null
+++ b/src/compiler/graph-visualizer.cc
@@ -0,0 +1,282 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-visualizer.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define DEAD_COLOR "#999999"
+
+class GraphVisualizer : public NullNodeVisitor {
+ public:
+  GraphVisualizer(OStream& os, Zone* zone, const Graph* graph);  // NOLINT
+
+  void Print();
+
+  GenericGraphVisit::Control Pre(Node* node);
+  GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+  void AnnotateNode(Node* node);
+  void PrintEdge(Node::Edge edge);
+
+  Zone* zone_;
+  NodeSet all_nodes_;
+  NodeSet white_nodes_;
+  bool use_to_def_;
+  OStream& os_;
+  const Graph* const graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
+};
+
+
+static Node* GetControlCluster(Node* node) {
+  if (OperatorProperties::IsBasicBlockBegin(node->op())) {
+    return node;
+  } else if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
+    Node* control = NodeProperties::GetControlInput(node, 0);
+    return OperatorProperties::IsBasicBlockBegin(control->op()) ? control
+                                                                : NULL;
+  } else {
+    return NULL;
+  }
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
+  if (all_nodes_.count(node) == 0) {
+    Node* control_cluster = GetControlCluster(node);
+    if (control_cluster != NULL) {
+      os_ << "  subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
+    }
+    os_ << "  ID" << node->id() << " [\n";
+    AnnotateNode(node);
+    os_ << "  ]\n";
+    if (control_cluster != NULL) os_ << "  }\n";
+    all_nodes_.insert(node);
+    if (use_to_def_) white_nodes_.insert(node);
+  }
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
+                                                    Node* to) {
+  if (use_to_def_) return GenericGraphVisit::CONTINUE;
+  // When going from def to use, only consider white -> other edges, which are
+  // the dead nodes that use live nodes. We're probably not interested in
+  // dead nodes that only use other dead nodes.
+  if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
+  return GenericGraphVisit::SKIP;
+}
+
+
+class Escaped {
+ public:
+  explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
+
+  friend OStream& operator<<(OStream& os, const Escaped& e) {
+    for (const char* s = e.str_; *s != '\0'; ++s) {
+      if (needs_escape(*s)) os << "\\";
+      os << *s;
+    }
+    return os;
+  }
+
+ private:
+  static bool needs_escape(char ch) {
+    switch (ch) {
+      case '>':
+      case '<':
+      case '|':
+      case '}':
+      case '{':
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  const char* const str_;
+};
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+  if (from->opcode() == IrOpcode::kPhi ||
+      from->opcode() == IrOpcode::kEffectPhi) {
+    Node* control = NodeProperties::GetControlInput(from, 0);
+    return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
+  } else if (from->opcode() == IrOpcode::kLoop) {
+    return index != 0;
+  } else {
+    return false;
+  }
+}
+
+
+void GraphVisualizer::AnnotateNode(Node* node) {
+  if (!use_to_def_) {
+    os_ << "    style=\"filled\"\n"
+        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+  }
+
+  os_ << "    shape=\"record\"\n";
+  switch (node->opcode()) {
+    case IrOpcode::kEnd:
+    case IrOpcode::kDead:
+    case IrOpcode::kStart:
+      os_ << "    style=\"diagonals\"\n";
+      break;
+    case IrOpcode::kMerge:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kLoop:
+      os_ << "    style=\"rounded\"\n";
+      break;
+    default:
+      break;
+  }
+
+  OStringStream label;
+  label << *node->op();
+  os_ << "    label=\"{{#" << node->id() << ":" << Escaped(label);
+
+  InputIter i = node->inputs().begin();
+  for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">#" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">F #" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+  }
+
+  if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) ||
+      GetControlCluster(node) == NULL) {
+    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+         ++i, j--) {
+      os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+    }
+  }
+  os_ << "}";
+
+  if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+    Bounds bounds = NodeProperties::GetBounds(node);
+    OStringStream upper;
+    bounds.upper->PrintTo(upper);
+    OStringStream lower;
+    bounds.lower->PrintTo(lower);
+    os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+  }
+  os_ << "}\"\n";
+}
+
+
+void GraphVisualizer::PrintEdge(Node::Edge edge) {
+  Node* from = edge.from();
+  int index = edge.index();
+  Node* to = edge.to();
+  bool unconstrained = IsLikelyBackEdge(from, index, to);
+  os_ << "  ID" << from->id();
+  if (all_nodes_.count(to) == 0) {
+    os_ << ":I" << index << ":n -> DEAD_INPUT";
+  } else if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
+             GetControlCluster(from) == NULL ||
+             (OperatorProperties::GetControlInputCount(from->op()) > 0 &&
+              NodeProperties::GetControlInput(from) != to)) {
+    os_ << ":I" << index << ":n -> ID" << to->id() << ":s"
+        << "[" << (unconstrained ? "constraint=false, " : "")
+        << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
+        << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
+        << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
+  } else {
+    os_ << " -> ID" << to->id() << ":s [color=transparent, "
+        << (unconstrained ? "constraint=false, " : "")
+        << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
+  }
+  os_ << "\n";
+}
+
+
+void GraphVisualizer::Print() {
+  os_ << "digraph D {\n"
+      << "  node [fontsize=8,height=0.25]\n"
+      << "  rankdir=\"BT\"\n"
+      << "  ranksep=\"1.2 equally\"\n"
+      << "  overlap=\"false\"\n"
+      << "  splines=\"true\"\n"
+      << "  concentrate=\"true\"\n"
+      << "  \n";
+
+  // Make sure all nodes have been output before writing out the edges.
+  use_to_def_ = true;
+  // TODO(svenpanne) Remove the need for the const_casts.
+  const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
+  white_nodes_.insert(const_cast<Graph*>(graph_)->start());
+
+  // Visit all uses of white nodes.
+  use_to_def_ = false;
+  GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
+      const_cast<Graph*>(graph_), zone_, white_nodes_.begin(),
+      white_nodes_.end(), this);
+
+  os_ << "  DEAD_INPUT [\n"
+      << "    style=\"filled\" \n"
+      << "    fillcolor=\"" DEAD_COLOR "\"\n"
+      << "  ]\n"
+      << "\n";
+
+  // With all the nodes written, add the edges.
+  for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
+    Node::Inputs inputs = (*i)->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      PrintEdge(iter.edge());
+    }
+  }
+  os_ << "}\n";
+}
+
+
+GraphVisualizer::GraphVisualizer(OStream& os, Zone* zone,
+                                 const Graph* graph)  // NOLINT
+    : zone_(zone),
+      all_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
+      white_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
+      use_to_def_(true),
+      os_(os),
+      graph_(graph) {}
+
+
+OStream& operator<<(OStream& os, const AsDOT& ad) {
+  Zone tmp_zone(ad.graph.zone()->isolate());
+  GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
+  return os;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
new file mode 100644
index 0000000..12532ba
--- /dev/null
+++ b/src/compiler/graph-visualizer.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
+#define V8_COMPILER_GRAPH_VISUALIZER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class Graph;
+
+struct AsDOT {
+  explicit AsDOT(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsDOT& ad);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_VISUALIZER_H_
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
new file mode 100644
index 0000000..7b5f228
--- /dev/null
+++ b/src/compiler/graph.cc
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph::Graph(Zone* zone) : GenericGraph<Node>(zone), decorators_(zone) {}
+
+
+Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs) {
+  DCHECK_LE(op->InputCount(), input_count);
+  Node* result = Node::New(this, input_count, inputs);
+  result->Initialize(op);
+  for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
+       i != decorators_.end(); ++i) {
+    (*i)->Decorate(result);
+  }
+  return result;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
new file mode 100644
index 0000000..07eb02f
--- /dev/null
+++ b/src/compiler/graph.h
@@ -0,0 +1,93 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_H_
+#define V8_COMPILER_GRAPH_H_
+
+#include <map>
+#include <set>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/source-position.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphDecorator;
+
+
+class Graph : public GenericGraph<Node> {
+ public:
+  explicit Graph(Zone* zone);
+
+  // Base implementation used by all factory methods.
+  Node* NewNode(const Operator* op, int input_count, Node** inputs);
+
+  // Factories for nodes with static input counts.
+  Node* NewNode(const Operator* op) {
+    return NewNode(op, 0, static_cast<Node**>(NULL));
+  }
+  Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+    Node* nodes[] = {n1, n2};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* nodes[] = {n1, n2, n3};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* nodes[] = {n1, n2, n3, n4};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* nodes[] = {n1, n2, n3, n4, n5};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+
+  template <class Visitor>
+  void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeUsesFromStart(Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeInputsFromEnd(Visitor* visitor);
+
+  void AddDecorator(GraphDecorator* decorator) {
+    decorators_.push_back(decorator);
+  }
+
+  void RemoveDecorator(GraphDecorator* decorator) {
+    ZoneVector<GraphDecorator*>::iterator it =
+        std::find(decorators_.begin(), decorators_.end(), decorator);
+    DCHECK(it != decorators_.end());
+    decorators_.erase(it, it + 1);
+  }
+
+ private:
+  ZoneVector<GraphDecorator*> decorators_;
+};
+
+
+class GraphDecorator : public ZoneObject {
+ public:
+  virtual ~GraphDecorator() {}
+  virtual void Decorate(Node* node) = 0;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
new file mode 100644
index 0000000..deab7cd
--- /dev/null
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -0,0 +1,959 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds IA-32 specific methods for decoding operands.
+class IA32OperandConverter : public InstructionOperandConverter {
+ public:
+  IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+  Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    if (op->IsRegister()) {
+      DCHECK(extra == 0);
+      return Operand(ToRegister(op));
+    } else if (op->IsDoubleRegister()) {
+      DCHECK(extra == 0);
+      return Operand(ToDoubleRegister(op));
+    }
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  }
+
+  Operand HighOperand(InstructionOperand* op) {
+    DCHECK(op->IsDoubleStackSlot());
+    return ToOperand(op, kPointerSize);
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kFloat64:
+        return Immediate(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Immediate(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Immediate(constant.ToHeapObject());
+      case Constant::kInt64:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
+                       times_1,
+                       0);  // TODO(dcarney): K != 0
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
+                                                 InputImmediate(offset + 1));
+      case kMode_MI:
+        *first_input += 1;
+        return Operand(InputImmediate(offset + 0));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  IA32OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+        __ Assert(equal, kWrongFunctionContext);
+      }
+      __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ jmp(code()->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kIA32Add:
+      if (HasImmediateInput(instr, 1)) {
+        __ add(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ add(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32And:
+      if (HasImmediateInput(instr, 1)) {
+        __ and_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ and_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Cmp:
+      if (HasImmediateInput(instr, 1)) {
+        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ cmp(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Test:
+      if (HasImmediateInput(instr, 1)) {
+        __ test(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ test(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Imul:
+      if (HasImmediateInput(instr, 1)) {
+        __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+      } else {
+        __ imul(i.OutputRegister(), i.InputOperand(1));
+      }
+      break;
+    case kIA32Idiv:
+      __ cdq();
+      __ idiv(i.InputOperand(1));
+      break;
+    case kIA32Udiv:
+      __ xor_(edx, edx);
+      __ div(i.InputOperand(1));
+      break;
+    case kIA32Not:
+      __ not_(i.OutputOperand());
+      break;
+    case kIA32Neg:
+      __ neg(i.OutputOperand());
+      break;
+    case kIA32Or:
+      if (HasImmediateInput(instr, 1)) {
+        __ or_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ or_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Xor:
+      if (HasImmediateInput(instr, 1)) {
+        __ xor_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ xor_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Sub:
+      if (HasImmediateInput(instr, 1)) {
+        __ sub(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ sub(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Shl:
+      if (HasImmediateInput(instr, 1)) {
+        __ shl(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shl_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Shr:
+      if (HasImmediateInput(instr, 1)) {
+        __ shr(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shr_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Sar:
+      if (HasImmediateInput(instr, 1)) {
+        __ sar(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ sar_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Ror:
+      if (HasImmediateInput(instr, 1)) {
+        __ ror(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ ror_cl(i.OutputRegister());
+      }
+      break;
+    case kSSEFloat64Cmp:
+      __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      break;
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      // TODO(dcarney): alignment is wrong.
+      __ sub(esp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(esp, 0));
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(esp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use eax.
+      __ fnstsw_ax();
+      __ sahf();
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(esp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+      __ add(esp, Immediate(kDoubleSize));
+      break;
+    }
+    case kSSEFloat64Sqrt:
+      __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat64ToInt32:
+      __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat64ToUint32: {
+      XMMRegister scratch = xmm0;
+      __ Move(scratch, -2147483648.0);
+      __ addsd(scratch, i.InputOperand(0));
+      __ cvttsd2si(i.OutputRegister(), scratch);
+      __ add(i.OutputRegister(), Immediate(0x80000000));
+      break;
+    }
+    case kSSEInt32ToFloat64:
+      __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEUint32ToFloat64:
+      // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+      __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    case kIA32Movsxbl:
+      __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movzxbl:
+      __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movb: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ mov_b(operand, i.InputInt8(index));
+      } else {
+        __ mov_b(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kIA32Movsxwl:
+      __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movzxwl:
+      __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movw: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ mov_w(operand, i.InputInt16(index));
+      } else {
+        __ mov_w(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kIA32Movl:
+      if (instr->HasOutput()) {
+        __ mov(i.OutputRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ mov(operand, i.InputImmediate(index));
+        } else {
+          __ mov(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kIA32Movsd:
+      if (instr->HasOutput()) {
+        __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ movsd(operand, i.InputDoubleRegister(index));
+      }
+      break;
+    case kIA32Movss:
+      if (instr->HasOutput()) {
+        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+        __ movss(operand, xmm0);
+      }
+      break;
+    case kIA32Push:
+      if (HasImmediateInput(instr, 0)) {
+        __ push(i.InputImmediate(0));
+      } else {
+        __ push(i.InputOperand(0));
+      }
+      break;
+    case kIA32StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ mov(Operand(object, index, times_1, 0), value);
+      __ lea(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+    case kOverflow:
+      __ j(overflow, tlabel);
+      break;
+    case kNotOverflow:
+      __ j(no_overflow, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+    case kOverflow:
+      cc = overflow;
+      break;
+    case kNotOverflow:
+      cc = no_overflow;
+      break;
+  }
+  __ bind(&check);
+  if (reg.is_byte_register()) {
+    // setcc for byte registers (al, bl, cl, dl).
+    __ setcc(cc, reg);
+    __ movzx_b(reg, reg);
+  } else {
+    // Emit a branch to set a register to either 1 or 0.
+    Label set;
+    __ j(cc, &set, Label::kNear);
+    __ mov(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&set);
+    __ mov(reg, Immediate(1));
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// The calling convention for JSFunctions on IA32 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//                 [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp                                        ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+//                      | CTX | FP | RET | args + receiver |  caller frame |
+//                      ^esp  ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+//                | FNC | CTX | FP | RET | args + receiver |  caller frame |
+//                ^esp        ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver |  caller frame |
+// ^esp                       ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// |                               | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// |                                                       |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On IA32 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//              [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+//                                                  | args |  caller frame |
+//                                                  ^ esp                  ^ ebp
+//                  [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp                             ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+//                 | callee frame | regs | FP | RET | args |  caller frame |
+//                 ^esp                  ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  Frame* frame = code_->frame();
+  int stack_slots = frame->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    // Assemble a prologue similar the to cdecl calling convention.
+    __ push(ebp);
+    __ mov(ebp, esp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ push(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
+      __ cmp(ecx, isolate()->factory()->undefined_value());
+      __ j(not_equal, &ok, Label::kNear);
+      __ mov(ecx, GlobalObjectOperand());
+      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
+      __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ sub(esp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(esp, Immediate(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ pop(Register::from_code(i));
+        }
+      }
+      __ pop(ebp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+      __ pop(ebp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+    __ pop(ebp);       // Pop caller's frame pointer.
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ mov(dst, src);
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, src);
+    } else {
+      Operand dst = g.ToOperand(destination);
+      __ push(src);
+      __ pop(dst);
+    }
+  } else if (source->IsConstant()) {
+    Constant src_constant = g.ToConstant(source);
+    if (src_constant.type() == Constant::kHeapObject) {
+      Handle<HeapObject> src = src_constant.ToHeapObject();
+      if (destination->IsRegister()) {
+        Register dst = g.ToRegister(destination);
+        __ LoadHeapObject(dst, src);
+      } else {
+        DCHECK(destination->IsStackSlot());
+        Operand dst = g.ToOperand(destination);
+        AllowDeferredHandleDereference embedding_raw_address;
+        if (isolate()->heap()->InNewSpace(*src)) {
+          __ PushHeapObject(src);
+          __ pop(dst);
+        } else {
+          __ mov(dst, src);
+        }
+      }
+    } else if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else if (destination->IsStackSlot()) {
+      Operand dst = g.ToOperand(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else {
+      double v = g.ToDouble(source);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, v);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        Operand dst0 = g.ToOperand(destination);
+        Operand dst1 = g.HighOperand(destination);
+        __ mov(dst0, Immediate(lower));
+        __ mov(dst1, Immediate(upper));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    Register src = g.ToRegister(source);
+    Register dst = g.ToRegister(destination);
+    __ xchg(dst, src);
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    // Register-memory.
+    __ xchg(g.ToRegister(source), g.ToOperand(destination));
+  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+    // Memory-memory.
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ push(dst);
+    __ push(src);
+    __ pop(dst);
+    __ pop(src);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
+  } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister reg = g.ToDoubleRegister(source);
+    Operand other = g.ToOperand(destination);
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movaps(reg, xmm0);
+  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+    // Double-width memory-to-memory.
+    Operand src0 = g.ToOperand(source);
+    Operand src1 = g.HighOperand(source);
+    Operand dst0 = g.ToOperand(destination);
+    Operand dst1 = g.HighOperand(destination);
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
+    __ push(src0);         // Then use stack to copy source to destination.
+    __ pop(dst0);
+    __ push(src1);
+    __ pop(dst1);
+    __ movsd(src0, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
new file mode 100644
index 0000000..0f46088
--- /dev/null
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// IA32-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(IA32Add)                       \
+  V(IA32And)                       \
+  V(IA32Cmp)                       \
+  V(IA32Test)                      \
+  V(IA32Or)                        \
+  V(IA32Xor)                       \
+  V(IA32Sub)                       \
+  V(IA32Imul)                      \
+  V(IA32Idiv)                      \
+  V(IA32Udiv)                      \
+  V(IA32Not)                       \
+  V(IA32Neg)                       \
+  V(IA32Shl)                       \
+  V(IA32Shr)                       \
+  V(IA32Sar)                       \
+  V(IA32Ror)                       \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
+  V(SSEFloat64ToInt32)             \
+  V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat64)             \
+  V(SSEUint32ToFloat64)            \
+  V(IA32Movsxbl)                   \
+  V(IA32Movzxbl)                   \
+  V(IA32Movb)                      \
+  V(IA32Movsxwl)                   \
+  V(IA32Movzxwl)                   \
+  V(IA32Movw)                      \
+  V(IA32Movl)                      \
+  V(IA32Movss)                     \
+  V(IA32Movsd)                     \
+  V(IA32Push)                      \
+  V(IA32StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MI)   /* [K] */                    \
+  V(MR)   /* [%r0] */                  \
+  V(MRI)  /* [%r0 + K] */              \
+  V(MR1I) /* [%r0 + %r1 * 1 + K] */    \
+  V(MR2I) /* [%r0 + %r1 * 2 + K] */    \
+  V(MR4I) /* [%r0 + %r1 * 4 + K] */    \
+  V(MR8I) /* [%r0 + %r1 * 8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
diff --git a/src/compiler/ia32/instruction-selector-ia32-unittest.cc b/src/compiler/ia32/instruction-selector-ia32-unittest.cc
new file mode 100644
index 0000000..60708c1
--- /dev/null
+++ b/src/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+    kMinInt, -42, -1, 0,  1,  2,    3,      4,          5,
+    6,       7,   8,  16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+
+}  // namespace
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    }
+    {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kIA32Movsxbl, kIA32Movb},
+    {kMachUint8, kIA32Movzxbl, kIA32Movb},
+    {kMachInt16, kIA32Movsxwl, kIA32Movw},
+    {kMachUint16, kIA32Movzxwl, kIA32Movw},
+    {kMachInt32, kIA32Movl, kIA32Movl},
+    {kMachUint32, kIA32Movl, kIA32Movl},
+    {kMachFloat32, kIA32Movss, kIA32Movss},
+    {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, base, kImmediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, kImmediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, base, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
+    m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
new file mode 100644
index 0000000..24ebc38
--- /dev/null
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -0,0 +1,563 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds IA32-specific methods for generating operands.
+class IA32OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit IA32OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, edx);
+  }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kExternalConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+        return !isolate()->heap()->InNewSpace(*value.handle());
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  // TODO(titzer): signed/unsigned small loads
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kIA32Movss;
+      break;
+    case kRepFloat64:
+      opcode = kIA32Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kIA32Movl;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // load [#base + #0]
+      Emit(opcode | AddressingModeField::encode(kMode_MI),
+           g.DefineAsRegister(node), g.UseImmediate(base));
+    } else {  // load [#base + %index]
+      Emit(opcode | AddressingModeField::encode(kMode_MRI),
+           g.DefineAsRegister(node), g.UseRegister(index),
+           g.UseImmediate(base));
+    }
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK_EQ(kRepTagged, rep);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+    Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+         g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  InstructionOperand* val;
+  if (g.CanBeImmediate(value)) {
+    val = g.UseImmediate(value);
+  } else if (rep == kRepWord8 || rep == kRepBit) {
+    val = g.UseByteRegister(value);
+  } else {
+    val = g.UseRegister(value);
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kIA32Movss;
+      break;
+    case kRepFloat64:
+      opcode = kIA32Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kIA32Movb;
+      break;
+    case kRepWord16:
+      opcode = kIA32Movw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kIA32Movl;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // store [#base], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
+           g.UseImmediate(base), val);
+    } else {  // store [#base + %index], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+           g.UseRegister(index), g.UseImmediate(base), val);
+    }
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(m.right().node())) {
+    inputs[input_count++] = g.Use(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.Use(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineSameAsFirst(node);
+  if (cont->IsSet()) {
+    // TODO(turbofan): Use byte register here.
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kIA32And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kIA32Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kIA32Xor);
+  }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, ecx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitShift(this, node, kIA32Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitShift(this, node, kIA32Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitShift(this, node, kIA32Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitShift(this, node, kIA32Ror);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kIA32Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kIA32Sub);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+         g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
+         g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+         g.Use(right));
+  }
+}
+
+
+static inline void VisitDiv(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(edx)};
+  size_t temp_count = arraysize(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, eax),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kIA32Udiv);
+}
+
+
+static inline void VisitMod(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+  size_t temp_count = arraysize(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kIA32Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(eax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+       temps);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kIA32Add, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kIA32Sub, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static inline void VisitCompare(InstructionSelector* selector,
+                                InstructionCode opcode,
+                                InstructionOperand* left,
+                                InstructionOperand* right,
+                                FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(opcode), NULL, left, right,
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    // TODO(titzer): Needs byte register.
+    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+                   left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
+                                    InstructionCode opcode,
+                                    FlagsContinuation* cont, bool commutative) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kIA32Cmp, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kIA32Test, cont, true);
+    default:
+      break;
+  }
+
+  IA32OperandGenerator g(this);
+  VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kIA32Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  IA32OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true);
+
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    // TODO(titzer): handle pushing double parameters.
+    Emit(kIA32Push, NULL,
+         g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
new file mode 100644
index 0000000..f2c5fab
--- /dev/null
+++ b/src/compiler/ia32/linkage-ia32.cc
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct IA32LinkageHelperTraits {
+  static Register ReturnValueReg() { return eax; }
+  static Register ReturnValue2Reg() { return edx; }
+  static Register JSCallFunctionReg() { return edi; }
+  static Register ContextReg() { return esi; }
+  static Register RuntimeCallFunctionReg() { return ebx; }
+  static Register RuntimeCallArgCountReg() { return eax; }
+  static RegList CCalleeSaveRegisters() {
+    return esi.bit() | edi.bit() | ebx.bit();
+  }
+  static Register CRegisterParameter(int i) { return no_reg; }
+  static int CRegisterParametersLength() { return 0; }
+};
+
+typedef LinkageHelper<IA32LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
new file mode 100644
index 0000000..2d921bd
--- /dev/null
+++ b/src/compiler/instruction-codes.h
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
+#define V8_COMPILER_INSTRUCTION_CODES_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/instruction-codes-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/instruction-codes-arm64.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/instruction-codes-x64.h"
+#else
+#define TARGET_ARCH_OPCODE_LIST(V)
+#define TARGET_ADDRESSING_MODE_LIST(V)
+#endif
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Target-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define ARCH_OPCODE_LIST(V) \
+  V(ArchCallCodeObject)     \
+  V(ArchCallJSFunction)     \
+  V(ArchJmp)                \
+  V(ArchNop)                \
+  V(ArchRet)                \
+  V(ArchTruncateDoubleToI)  \
+  TARGET_ARCH_OPCODE_LIST(V)
+
+enum ArchOpcode {
+#define DECLARE_ARCH_OPCODE(Name) k##Name,
+  ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
+#undef DECLARE_ARCH_OPCODE
+#define COUNT_ARCH_OPCODE(Name) +1
+  kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+#undef COUNT_ARCH_OPCODE
+};
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao);
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define ADDRESSING_MODE_LIST(V) \
+  V(None)                       \
+  TARGET_ADDRESSING_MODE_LIST(V)
+
+enum AddressingMode {
+#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
+  ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
+#undef DECLARE_ADDRESSING_MODE
+#define COUNT_ADDRESSING_MODE(Name) +1
+  kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+#undef COUNT_ADDRESSING_MODE
+};
+
+OStream& operator<<(OStream& os, const AddressingMode& am);
+
+// The mode of the flags continuation (see below).
+enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+
+OStream& operator<<(OStream& os, const FlagsMode& fm);
+
+// The condition of flags continuation (see below).
+enum FlagsCondition {
+  kEqual,
+  kNotEqual,
+  kSignedLessThan,
+  kSignedGreaterThanOrEqual,
+  kSignedLessThanOrEqual,
+  kSignedGreaterThan,
+  kUnsignedLessThan,
+  kUnsignedGreaterThanOrEqual,
+  kUnsignedLessThanOrEqual,
+  kUnsignedGreaterThan,
+  kUnorderedEqual,
+  kUnorderedNotEqual,
+  kUnorderedLessThan,
+  kUnorderedGreaterThanOrEqual,
+  kUnorderedLessThanOrEqual,
+  kUnorderedGreaterThan,
+  kOverflow,
+  kNotOverflow
+};
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc);
+
+// The InstructionCode is an opaque, target-specific integer that encodes
+// what code to emit for an instruction in the code generator. It is not
+// interesting to the register allocator, as the inputs and flags on the
+// instructions specify everything of interest.
+typedef int32_t InstructionCode;
+
+// Helpers for encoding / decoding InstructionCode into the fields needed
+// for code generation. We encode the instruction, addressing mode, and flags
+// continuation into a single InstructionCode which is stored as part of
+// the instruction.
+typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
+typedef BitField<AddressingMode, 7, 4> AddressingModeField;
+typedef BitField<FlagsMode, 11, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
+typedef BitField<int, 13, 19> MiscField;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_CODES_H_
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
new file mode 100644
index 0000000..d00109e
--- /dev/null
+++ b/src/compiler/instruction-selector-impl.h
@@ -0,0 +1,360 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class for the instruction selector that simplifies construction of
+// Operands. This class implements a base for architecture-specific helpers.
+class OperandGenerator {
+ public:
+  explicit OperandGenerator(InstructionSelector* selector)
+      : selector_(selector) {}
+
+  InstructionOperand* DefineAsRegister(Node* node) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* DefineSameAsFirst(Node* result) {
+    return Define(result, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+  }
+
+  InstructionOperand* DefineAsFixed(Node* node, Register reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                     Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsFixed(Node* node, DoubleRegister reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                     DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsConstant(Node* node) {
+    selector()->MarkAsDefined(node);
+    sequence()->AddConstant(node->id(), ToConstant(node));
+    return ConstantOperand::Create(node->id(), zone());
+  }
+
+  InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
+                                       MachineType type) {
+    return Define(node, ToUnallocatedOperand(location, type));
+  }
+
+  InstructionOperand* Use(Node* node) {
+    return Use(node,
+               new (zone()) UnallocatedOperand(
+                   UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+  }
+
+  InstructionOperand* UseRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                  UnallocatedOperand::USED_AT_START));
+  }
+
+  // Use register or operand for the node. If a register is chosen, it won't
+  // alias any temporary or output registers.
+  InstructionOperand* UseUnique(Node* node) {
+    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+  }
+
+  // Use a unique register for the node that does not alias any temporary or
+  // output registers.
+  InstructionOperand* UseUniqueRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* UseFixed(Node* node, Register reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseFixed(Node* node, DoubleRegister reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseImmediate(Node* node) {
+    int index = sequence()->AddImmediate(ToConstant(node));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* UseLocation(Node* node, LinkageLocation location,
+                                  MachineType type) {
+    return Use(node, ToUnallocatedOperand(location, type));
+  }
+
+  InstructionOperand* TempRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    return op;
+  }
+
+  InstructionOperand* TempDoubleRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    sequence()->MarkAsDouble(op->virtual_register());
+    return op;
+  }
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* TempImmediate(int32_t imm) {
+    int index = sequence()->AddImmediate(Constant(imm));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* Label(BasicBlock* block) {
+    // TODO(bmeurer): We misuse ImmediateOperand here.
+    return TempImmediate(block->id());
+  }
+
+ protected:
+  Graph* graph() const { return selector()->graph(); }
+  InstructionSelector* selector() const { return selector_; }
+  InstructionSequence* sequence() const { return selector()->sequence(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return selector()->instruction_zone(); }
+
+ private:
+  static Constant ToConstant(const Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return Constant(OpParameter<int32_t>(node));
+      case IrOpcode::kInt64Constant:
+        return Constant(OpParameter<int64_t>(node));
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant:
+        return Constant(OpParameter<double>(node));
+      case IrOpcode::kExternalConstant:
+        return Constant(OpParameter<ExternalReference>(node));
+      case IrOpcode::kHeapConstant:
+        return Constant(OpParameter<Unique<HeapObject> >(node).handle());
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return Constant(static_cast<int32_t>(0));
+  }
+
+  UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+    DCHECK_NOT_NULL(node);
+    DCHECK_NOT_NULL(operand);
+    operand->set_virtual_register(node->id());
+    selector()->MarkAsDefined(node);
+    return operand;
+  }
+
+  UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+    DCHECK_NOT_NULL(node);
+    DCHECK_NOT_NULL(operand);
+    operand->set_virtual_register(node->id());
+    selector()->MarkAsUsed(node);
+    return operand;
+  }
+
+  UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location,
+                                           MachineType type) {
+    if (location.location_ == LinkageLocation::ANY_REGISTER) {
+      return new (zone())
+          UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+    }
+    if (location.location_ < 0) {
+      return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+                                             location.location_);
+    }
+    if (RepresentationOf(type) == kRepFloat64) {
+      return new (zone()) UnallocatedOperand(
+          UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+    }
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           location.location_);
+  }
+
+  InstructionSelector* selector_;
+};
+
+
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator, and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation FINAL {
+ public:
+  FlagsContinuation() : mode_(kFlags_none) {}
+
+  // Creates a new flags continuation from the given condition and true/false
+  // blocks.
+  FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
+                    BasicBlock* false_block)
+      : mode_(kFlags_branch),
+        condition_(condition),
+        true_block_(true_block),
+        false_block_(false_block) {
+    DCHECK_NOT_NULL(true_block);
+    DCHECK_NOT_NULL(false_block);
+  }
+
+  // Creates a new flags continuation from the given condition and result node.
+  FlagsContinuation(FlagsCondition condition, Node* result)
+      : mode_(kFlags_set), condition_(condition), result_(result) {
+    DCHECK_NOT_NULL(result);
+  }
+
+  bool IsNone() const { return mode_ == kFlags_none; }
+  bool IsBranch() const { return mode_ == kFlags_branch; }
+  bool IsSet() const { return mode_ == kFlags_set; }
+  FlagsCondition condition() const {
+    DCHECK(!IsNone());
+    return condition_;
+  }
+  Node* result() const {
+    DCHECK(IsSet());
+    return result_;
+  }
+  BasicBlock* true_block() const {
+    DCHECK(IsBranch());
+    return true_block_;
+  }
+  BasicBlock* false_block() const {
+    DCHECK(IsBranch());
+    return false_block_;
+  }
+
+  void Negate() {
+    DCHECK(!IsNone());
+    condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+  }
+
+  void Commute() {
+    DCHECK(!IsNone());
+    switch (condition_) {
+      case kEqual:
+      case kNotEqual:
+      case kOverflow:
+      case kNotOverflow:
+        return;
+      case kSignedLessThan:
+        condition_ = kSignedGreaterThan;
+        return;
+      case kSignedGreaterThanOrEqual:
+        condition_ = kSignedLessThanOrEqual;
+        return;
+      case kSignedLessThanOrEqual:
+        condition_ = kSignedGreaterThanOrEqual;
+        return;
+      case kSignedGreaterThan:
+        condition_ = kSignedLessThan;
+        return;
+      case kUnsignedLessThan:
+        condition_ = kUnsignedGreaterThan;
+        return;
+      case kUnsignedGreaterThanOrEqual:
+        condition_ = kUnsignedLessThanOrEqual;
+        return;
+      case kUnsignedLessThanOrEqual:
+        condition_ = kUnsignedGreaterThanOrEqual;
+        return;
+      case kUnsignedGreaterThan:
+        condition_ = kUnsignedLessThan;
+        return;
+      case kUnorderedEqual:
+      case kUnorderedNotEqual:
+        return;
+      case kUnorderedLessThan:
+        condition_ = kUnorderedGreaterThan;
+        return;
+      case kUnorderedGreaterThanOrEqual:
+        condition_ = kUnorderedLessThanOrEqual;
+        return;
+      case kUnorderedLessThanOrEqual:
+        condition_ = kUnorderedGreaterThanOrEqual;
+        return;
+      case kUnorderedGreaterThan:
+        condition_ = kUnorderedLessThan;
+        return;
+    }
+    UNREACHABLE();
+  }
+
+  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+    bool negate = condition_ == kEqual;
+    condition_ = condition;
+    if (negate) Negate();
+  }
+
+  void SwapBlocks() { std::swap(true_block_, false_block_); }
+
+  // Encodes this flags continuation into the given opcode.
+  InstructionCode Encode(InstructionCode opcode) {
+    opcode |= FlagsModeField::encode(mode_);
+    if (mode_ != kFlags_none) {
+      opcode |= FlagsConditionField::encode(condition_);
+    }
+    return opcode;
+  }
+
+ private:
+  FlagsMode mode_;
+  FlagsCondition condition_;
+  Node* result_;             // Only valid if mode_ == kFlags_set.
+  BasicBlock* true_block_;   // Only valid if mode_ == kFlags_branch.
+  BasicBlock* false_block_;  // Only valid if mode_ == kFlags_branch.
+};
+
+
+// An internal helper class for generating the operands to calls.
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+struct CallBuffer {
+  CallBuffer(Zone* zone, CallDescriptor* descriptor,
+             FrameStateDescriptor* frame_state);
+
+  CallDescriptor* descriptor;
+  FrameStateDescriptor* frame_state_descriptor;
+  NodeVector output_nodes;
+  InstructionOperandVector outputs;
+  InstructionOperandVector instruction_args;
+  NodeVector pushed_nodes;
+
+  size_t input_count() const { return descriptor->InputCount(); }
+
+  size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+  size_t frame_state_value_count() const {
+    return (frame_state_descriptor == NULL)
+               ? 0
+               : (frame_state_descriptor->GetTotalSize() +
+                  1);  // Include deopt id.
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
diff --git a/src/compiler/instruction-selector-unittest.cc b/src/compiler/instruction-selector-unittest.cc
new file mode 100644
index 0000000..aa70735
--- /dev/null
+++ b/src/compiler/instruction-selector-unittest.cc
@@ -0,0 +1,496 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+#include "src/compiler/compiler-test-utils.h"
+#include "src/flags.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+}  // namespace
+
+
+InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
+
+
+InstructionSelectorTest::~InstructionSelectorTest() {}
+
+
+InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
+    InstructionSelector::Features features,
+    InstructionSelectorTest::StreamBuilderMode mode) {
+  Schedule* schedule = Export();
+  if (FLAG_trace_turbo) {
+    OFStream out(stdout);
+    out << "=== Schedule before instruction selection ===" << endl << *schedule;
+  }
+  EXPECT_NE(0, graph()->NodeCount());
+  CompilationInfo info(test_->isolate(), test_->zone());
+  Linkage linkage(&info, call_descriptor());
+  InstructionSequence sequence(&linkage, graph(), schedule);
+  SourcePositionTable source_position_table(graph());
+  InstructionSelector selector(&sequence, &source_position_table, features);
+  selector.SelectInstructions();
+  if (FLAG_trace_turbo) {
+    OFStream out(stdout);
+    out << "=== Code sequence after instruction selection ===" << endl
+        << sequence;
+  }
+  Stream s;
+  std::set<int> virtual_registers;
+  for (InstructionSequence::const_iterator i = sequence.begin();
+       i != sequence.end(); ++i) {
+    Instruction* instr = *i;
+    if (instr->opcode() < 0) continue;
+    if (mode == kTargetInstructions) {
+      switch (instr->arch_opcode()) {
+#define CASE(Name) \
+  case k##Name:    \
+    break;
+        TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+        default:
+          continue;
+      }
+    }
+    if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
+      continue;
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i) {
+      InstructionOperand* output = instr->OutputAt(i);
+      EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
+      if (output->IsConstant()) {
+        s.constants_.insert(std::make_pair(
+            output->index(), sequence.GetConstant(output->index())));
+        virtual_registers.insert(output->index());
+      } else if (output->IsUnallocated()) {
+        virtual_registers.insert(
+            UnallocatedOperand::cast(output)->virtual_register());
+      }
+    }
+    for (size_t i = 0; i < instr->InputCount(); ++i) {
+      InstructionOperand* input = instr->InputAt(i);
+      EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
+      if (input->IsImmediate()) {
+        s.immediates_.insert(std::make_pair(
+            input->index(), sequence.GetImmediate(input->index())));
+      } else if (input->IsUnallocated()) {
+        virtual_registers.insert(
+            UnallocatedOperand::cast(input)->virtual_register());
+      }
+    }
+    s.instructions_.push_back(instr);
+  }
+  for (std::set<int>::const_iterator i = virtual_registers.begin();
+       i != virtual_registers.end(); ++i) {
+    int virtual_register = *i;
+    if (sequence.IsDouble(virtual_register)) {
+      EXPECT_FALSE(sequence.IsReference(virtual_register));
+      s.doubles_.insert(virtual_register);
+    }
+    if (sequence.IsReference(virtual_register)) {
+      EXPECT_FALSE(sequence.IsDouble(virtual_register));
+      s.references_.insert(virtual_register);
+    }
+  }
+  for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
+    s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
+        InstructionSequence::StateId::FromInt(i)));
+  }
+  return s;
+}
+
+
+// -----------------------------------------------------------------------------
+// Return.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  m.Return(m.Parameter(0));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
+  StreamBuilder m(this, kMachInt32);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+  EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
+  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachFloat64);
+  m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+  EXPECT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArchRet, s[2]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Parameters.
+
+
+TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
+  StreamBuilder m(this, kMachFloat64, kMachFloat64);
+  Node* param = m.Parameter(0);
+  m.Return(param);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_TRUE(s.IsDouble(param->id()));
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+  Node* param = m.Parameter(0);
+  m.Return(param);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_TRUE(s.IsReference(param->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Finish.
+
+
+TARGET_TEST_F(InstructionSelectorTest, Finish) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+  Node* param = m.Parameter(0);
+  Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+  m.Return(finish);
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_TRUE(s[0]->Output()->IsUnallocated());
+  EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
+  EXPECT_EQ(kArchNop, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->InputCount());
+  ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
+  EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_TRUE(s[1]->Output()->IsUnallocated());
+  EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
+  EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
+  EXPECT_TRUE(s.IsReference(finish->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi.
+
+
+typedef InstructionSelectorTestWithParam<MachineType>
+    InstructionSelectorPhiTest;
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
+  const MachineType type = GetParam();
+  StreamBuilder m(this, type, type, type);
+  Node* param0 = m.Parameter(0);
+  Node* param1 = m.Parameter(1);
+  MLabel a, b, c;
+  m.Branch(m.Int32Constant(0), &a, &b);
+  m.Bind(&a);
+  m.Goto(&c);
+  m.Bind(&b);
+  m.Goto(&c);
+  m.Bind(&c);
+  Node* phi = m.Phi(type, param0, param1);
+  m.Return(phi);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param0->id()));
+  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param1->id()));
+}
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
+  const MachineType type = GetParam();
+  StreamBuilder m(this, type, type, type);
+  Node* param0 = m.Parameter(0);
+  Node* param1 = m.Parameter(1);
+  MLabel a, b, c;
+  m.Branch(m.Int32Constant(1), &a, &b);
+  m.Bind(&a);
+  m.Goto(&c);
+  m.Bind(&b);
+  m.Goto(&c);
+  m.Bind(&c);
+  Node* phi = m.Phi(type, param0, param1);
+  m.Return(phi);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param0->id()));
+  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param1->id()));
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
+                        ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
+                                          kMachInt16, kMachUint16, kMachInt32,
+                                          kMachUint32, kMachInt64, kMachUint64,
+                                          kMachPtr, kMachAnyTagged));
+
+
+// -----------------------------------------------------------------------------
+// ValueEffect.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
+  StreamBuilder m1(this, kMachInt32, kMachPtr);
+  Node* p1 = m1.Parameter(0);
+  m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+  Stream s1 = m1.Build(kAllInstructions);
+  StreamBuilder m2(this, kMachInt32, kMachPtr);
+  Node* p2 = m2.Parameter(0);
+  m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
+                       m2.NewNode(m2.common()->ValueEffect(1), p2)));
+  Stream s2 = m2.Build(kAllInstructions);
+  EXPECT_LE(3U, s1.size());
+  ASSERT_EQ(s1.size(), s2.size());
+  TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
+    const Instruction* i1 = s1[i];
+    const Instruction* i2 = s2[i];
+    EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
+    EXPECT_EQ(i1->InputCount(), i2->InputCount());
+    EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Calls with deoptimization.
+TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id(42);
+
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Parameter(2);
+
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
+  Node* locals = m.NewNode(m.common()->StateValues(0));
+  Node* stack = m.NewNode(m.common()->StateValues(0));
+  Node* context_dummy = m.Int32Constant(0);
+
+  Node* state_node = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id, kPushOutput), parameters,
+      locals, stack, context_dummy, m.UndefinedConstant());
+  Node* call = m.CallJS0(function_node, receiver, context, state_node);
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
+       index++) {
+  }
+  // Now we should have two instructions: call and return.
+  ASSERT_EQ(index + 2, s.size());
+
+  EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+  // TODO(jarin) Check deoptimization table.
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id_before(42);
+
+  // Some arguments for the call node.
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Int32Constant(1);  // Context is ignored.
+
+  // Build frame state for the state before the call.
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+
+  Node* context_sentinel = m.Int32Constant(0);
+  Node* frame_state_before = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+      parameters, locals, stack, context_sentinel, m.UndefinedConstant());
+
+  // Build the call.
+  Node* call = m.CallFunctionStub0(function_node, receiver, context,
+                                   frame_state_before, CALL_AS_METHOD);
+
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+       index++) {
+  }
+  // Now we should have two instructions: call, return.
+  ASSERT_EQ(index + 2, s.size());
+
+  // Check the call instruction
+  const Instruction* call_instr = s[index++];
+  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+  size_t num_operands =
+      1 +  // Code object.
+      1 +
+      4 +  // Frame state deopt id + one input for each value in frame state.
+      1 +  // Function.
+      1;   // Context.
+  ASSERT_EQ(num_operands, call_instr->InputCount());
+
+  // Code object.
+  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+  // Deoptimization id.
+  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+  FrameStateDescriptor* desc_before =
+      s.GetFrameStateDescriptor(deopt_id_before);
+  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+  EXPECT_EQ(kPushOutput, desc_before->state_combine());
+  EXPECT_EQ(1u, desc_before->parameters_count());
+  EXPECT_EQ(1u, desc_before->locals_count());
+  EXPECT_EQ(1u, desc_before->stack_count());
+  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
+  EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3)));
+  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(4)));
+  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(5)));
+
+  // Function.
+  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(6)));
+  // Context.
+  EXPECT_EQ(context->id(), s.ToVreg(call_instr->InputAt(7)));
+
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+  EXPECT_EQ(index, s.size());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest,
+              CallFunctionStubDeoptRecursiveFrameState) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id_before(42);
+  BailoutId bailout_id_parent(62);
+
+  // Some arguments for the call node.
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Int32Constant(66);
+
+  // Build frame state for the state before the call.
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
+  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
+  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
+  Node* frame_state_parent = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_parent, kIgnoreOutput),
+      parameters, locals, stack, context, m.UndefinedConstant());
+
+  Node* context2 = m.Int32Constant(46);
+  Node* parameters2 =
+      m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+  Node* locals2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+  Node* stack2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+  Node* frame_state_before = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+      parameters2, locals2, stack2, context2, frame_state_parent);
+
+  // Build the call.
+  Node* call = m.CallFunctionStub0(function_node, receiver, context2,
+                                   frame_state_before, CALL_AS_METHOD);
+
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+       index++) {
+  }
+  // Now we should have three instructions: call, return.
+  EXPECT_EQ(index + 2, s.size());
+
+  // Check the call instruction
+  const Instruction* call_instr = s[index++];
+  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+  size_t num_operands =
+      1 +  // Code object.
+      1 +  // Frame state deopt id
+      4 +  // One input for each value in frame state + context.
+      4 +  // One input for each value in the parent frame state + context.
+      1 +  // Function.
+      1;   // Context.
+  EXPECT_EQ(num_operands, call_instr->InputCount());
+  // Code object.
+  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+  // Deoptimization id.
+  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+  FrameStateDescriptor* desc_before =
+      s.GetFrameStateDescriptor(deopt_id_before);
+  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+  EXPECT_EQ(1u, desc_before->parameters_count());
+  EXPECT_EQ(1u, desc_before->locals_count());
+  EXPECT_EQ(1u, desc_before->stack_count());
+  EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
+  // Context:
+  EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
+  EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
+  EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
+  // Values from parent environment should follow.
+  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
+  EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
+  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(8)));
+  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(9)));
+
+  // Function.
+  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(10)));
+  // Context.
+  EXPECT_EQ(context2->id(), s.ToVreg(call_instr->InputAt(11)));
+  // Continuation.
+
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+  EXPECT_EQ(index, s.size());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-selector-unittest.h b/src/compiler/instruction-selector-unittest.h
new file mode 100644
index 0000000..4e12dab
--- /dev/null
+++ b/src/compiler/instruction-selector-unittest.h
@@ -0,0 +1,209 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+ public:
+  InstructionSelectorTest();
+  virtual ~InstructionSelectorTest();
+
+  base::RandomNumberGenerator* rng() { return &rng_; }
+
+  class Stream;
+
+  enum StreamBuilderMode {
+    kAllInstructions,
+    kTargetInstructions,
+    kAllExceptNopInstructions
+  };
+
+  class StreamBuilder FINAL : public RawMachineAssembler {
+   public:
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
+        : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
+                              MakeMachineSignature(test->zone(), return_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type, MachineType parameter1_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type,
+                                   parameter1_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type, MachineType parameter1_type,
+                  MachineType parameter2_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type,
+                                   parameter1_type, parameter2_type)),
+          test_(test) {}
+
+    Stream Build(CpuFeature feature) {
+      return Build(InstructionSelector::Features(feature));
+    }
+    Stream Build(CpuFeature feature1, CpuFeature feature2) {
+      return Build(InstructionSelector::Features(feature1, feature2));
+    }
+    Stream Build(StreamBuilderMode mode = kTargetInstructions) {
+      return Build(InstructionSelector::Features(), mode);
+    }
+    Stream Build(InstructionSelector::Features features,
+                 StreamBuilderMode mode = kTargetInstructions);
+
+   private:
+    MachineSignature* MakeMachineSignature(Zone* zone,
+                                           MachineType return_type) {
+      MachineSignature::Builder builder(zone, 1, 0);
+      builder.AddReturn(return_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type) {
+      MachineSignature::Builder builder(zone, 1, 1);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type,
+                                           MachineType parameter1_type) {
+      MachineSignature::Builder builder(zone, 1, 2);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      builder.AddParam(parameter1_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type,
+                                           MachineType parameter1_type,
+                                           MachineType parameter2_type) {
+      MachineSignature::Builder builder(zone, 1, 3);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      builder.AddParam(parameter1_type);
+      builder.AddParam(parameter2_type);
+      return builder.Build();
+    }
+
+   private:
+    InstructionSelectorTest* test_;
+  };
+
+  class Stream FINAL {
+   public:
+    size_t size() const { return instructions_.size(); }
+    const Instruction* operator[](size_t index) const {
+      EXPECT_LT(index, size());
+      return instructions_[index];
+    }
+
+    bool IsDouble(const InstructionOperand* operand) const {
+      return IsDouble(ToVreg(operand));
+    }
+    bool IsDouble(int virtual_register) const {
+      return doubles_.find(virtual_register) != doubles_.end();
+    }
+
+    bool IsInteger(const InstructionOperand* operand) const {
+      return IsInteger(ToVreg(operand));
+    }
+    bool IsInteger(int virtual_register) const {
+      return !IsDouble(virtual_register) && !IsReference(virtual_register);
+    }
+
+    bool IsReference(const InstructionOperand* operand) const {
+      return IsReference(ToVreg(operand));
+    }
+    bool IsReference(int virtual_register) const {
+      return references_.find(virtual_register) != references_.end();
+    }
+
+    int32_t ToInt32(const InstructionOperand* operand) const {
+      return ToConstant(operand).ToInt32();
+    }
+
+    int64_t ToInt64(const InstructionOperand* operand) const {
+      return ToConstant(operand).ToInt64();
+    }
+
+    int ToVreg(const InstructionOperand* operand) const {
+      if (operand->IsConstant()) return operand->index();
+      EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
+      return UnallocatedOperand::cast(operand)->virtual_register();
+    }
+
+    FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
+      EXPECT_LT(deoptimization_id, GetFrameStateDescriptorCount());
+      return deoptimization_entries_[deoptimization_id];
+    }
+
+    int GetFrameStateDescriptorCount() {
+      return static_cast<int>(deoptimization_entries_.size());
+    }
+
+   private:
+    Constant ToConstant(const InstructionOperand* operand) const {
+      ConstantMap::const_iterator i;
+      if (operand->IsConstant()) {
+        i = constants_.find(operand->index());
+        EXPECT_FALSE(constants_.end() == i);
+      } else {
+        EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+        i = immediates_.find(operand->index());
+        EXPECT_FALSE(immediates_.end() == i);
+      }
+      EXPECT_EQ(operand->index(), i->first);
+      return i->second;
+    }
+
+    friend class StreamBuilder;
+
+    typedef std::map<int, Constant> ConstantMap;
+
+    ConstantMap constants_;
+    ConstantMap immediates_;
+    std::deque<Instruction*> instructions_;
+    std::set<int> doubles_;
+    std::set<int> references_;
+    std::deque<FrameStateDescriptor*> deoptimization_entries_;
+  };
+
+  base::RandomNumberGenerator rng_;
+};
+
+
+template <typename T>
+class InstructionSelectorTestWithParam
+    : public InstructionSelectorTest,
+      public ::testing::WithParamInterface<T> {};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
new file mode 100644
index 0000000..3c32b64
--- /dev/null
+++ b/src/compiler/instruction-selector.cc
@@ -0,0 +1,1101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector.h"
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+                                         SourcePositionTable* source_positions,
+                                         Features features)
+    : zone_(sequence->isolate()),
+      sequence_(sequence),
+      source_positions_(source_positions),
+      features_(features),
+      current_block_(NULL),
+      instructions_(zone()),
+      defined_(graph()->NodeCount(), false, zone()),
+      used_(graph()->NodeCount(), false, zone()) {}
+
+
+void InstructionSelector::SelectInstructions() {
+  // Mark the inputs of all phis in loop headers as used.
+  BasicBlockVector* blocks = schedule()->rpo_order();
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    if (!block->IsLoopHeader()) continue;
+    DCHECK_NE(0, block->PredecessorCount());
+    DCHECK_NE(1, block->PredecessorCount());
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // Mark all inputs as used.
+      Node::Inputs inputs = phi->inputs();
+      for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
+        MarkAsUsed(*k);
+      }
+    }
+  }
+
+  // Visit each basic block in post order.
+  for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+    VisitBlock(*i);
+  }
+
+  // Schedule the selected instructions.
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    size_t end = block->code_end_;
+    size_t start = block->code_start_;
+    sequence()->StartBlock(block);
+    while (start-- > end) {
+      sequence()->AddInstruction(instructions_[start], block);
+    }
+    sequence()->EndBlock(block);
+  }
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b,
+                                       InstructionOperand* c, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    size_t temp_count, InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
+    size_t input_count, InstructionOperand** inputs, size_t temp_count,
+    InstructionOperand** temps) {
+  Instruction* instr =
+      Instruction::New(instruction_zone(), opcode, output_count, outputs,
+                       input_count, inputs, temp_count, temps);
+  return Emit(instr);
+}
+
+
+Instruction* InstructionSelector::Emit(Instruction* instr) {
+  instructions_.push_back(instr);
+  return instr;
+}
+
+
+bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
+  return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+         block->deferred_ == current_block_->deferred_;
+}
+
+
+bool InstructionSelector::CanCover(Node* user, Node* node) const {
+  return node->OwnedBy(user) &&
+         schedule()->block(node) == schedule()->block(user);
+}
+
+
+bool InstructionSelector::IsDefined(Node* node) const {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(defined_.size()));
+  return defined_[id];
+}
+
+
+void InstructionSelector::MarkAsDefined(Node* node) {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(defined_.size()));
+  defined_[id] = true;
+}
+
+
+bool InstructionSelector::IsUsed(Node* node) const {
+  if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(used_.size()));
+  return used_[id];
+}
+
+
+void InstructionSelector::MarkAsUsed(Node* node) {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(used_.size()));
+  used_[id] = true;
+}
+
+
+bool InstructionSelector::IsDouble(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  return sequence()->IsDouble(node->id());
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsReference(node));
+  sequence()->MarkAsDouble(node->id());
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  return sequence()->IsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsDouble(node));
+  sequence()->MarkAsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
+  DCHECK_NOT_NULL(node);
+  switch (RepresentationOf(rep)) {
+    case kRepFloat32:
+    case kRepFloat64:
+      MarkAsDouble(node);
+      break;
+    case kRepTagged:
+      MarkAsReference(node);
+      break;
+    default:
+      break;
+  }
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+                       FrameStateDescriptor* frame_desc)
+    : descriptor(d),
+      frame_state_descriptor(frame_desc),
+      output_nodes(zone),
+      outputs(zone),
+      instruction_args(zone),
+      pushed_nodes(zone) {
+  output_nodes.reserve(d->ReturnCount());
+  outputs.reserve(d->ReturnCount());
+  pushed_nodes.reserve(input_count());
+  instruction_args.reserve(input_count() + frame_state_value_count());
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                                               bool call_code_immediate,
+                                               bool call_address_immediate) {
+  OperandGenerator g(this);
+  DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
+  DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
+            buffer->input_count() + buffer->frame_state_count());
+
+  if (buffer->descriptor->ReturnCount() > 0) {
+    // Collect the projections that represent multiple outputs from this call.
+    if (buffer->descriptor->ReturnCount() == 1) {
+      buffer->output_nodes.push_back(call);
+    } else {
+      buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
+      call->CollectProjections(&buffer->output_nodes);
+    }
+
+    // Filter out the outputs that aren't live because no projection uses them.
+    for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
+      if (buffer->output_nodes[i] != NULL) {
+        Node* output = buffer->output_nodes[i];
+        MachineType type =
+            buffer->descriptor->GetReturnType(static_cast<int>(i));
+        LinkageLocation location =
+            buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+        MarkAsRepresentation(type, output);
+        buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
+      }
+    }
+  }
+
+  // The first argument is always the callee code.
+  Node* callee = call->InputAt(0);
+  switch (buffer->descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject:
+      buffer->instruction_args.push_back(
+          (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee));
+      break;
+    case CallDescriptor::kCallAddress:
+      buffer->instruction_args.push_back(
+          (call_address_immediate &&
+           (callee->opcode() == IrOpcode::kInt32Constant ||
+            callee->opcode() == IrOpcode::kInt64Constant))
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee));
+      break;
+    case CallDescriptor::kCallJSFunction:
+      buffer->instruction_args.push_back(
+          g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
+                        buffer->descriptor->GetInputType(0)));
+      break;
+  }
+  DCHECK_EQ(1, buffer->instruction_args.size());
+
+  // If the call needs a frame state, we insert the state information as
+  // follows (n is the number of value inputs to the frame state):
+  // arg 1               : deoptimization id.
+  // arg 2 - arg (n + 1) : value inputs to the frame state.
+  if (buffer->frame_state_descriptor != NULL) {
+    InstructionSequence::StateId state_id =
+        sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
+    buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
+
+    Node* frame_state =
+        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+    AddFrameStateInputs(frame_state, &buffer->instruction_args,
+                        buffer->frame_state_descriptor);
+  }
+  DCHECK(1 + buffer->frame_state_value_count() ==
+         buffer->instruction_args.size());
+
+  size_t input_count = static_cast<size_t>(buffer->input_count());
+
+  // Split the arguments into pushed_nodes and instruction_args. Pushed
+  // arguments require an explicit push instruction before the call and do
+  // not appear as arguments to the call. Everything else ends up
+  // as an InstructionOperand argument to the call.
+  InputIter iter(call->inputs().begin());
+  int pushed_count = 0;
+  for (size_t index = 0; index < input_count; ++iter, ++index) {
+    DCHECK(iter != call->inputs().end());
+    DCHECK(index == static_cast<size_t>(iter.index()));
+    DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
+    if (index == 0) continue;  // The first argument (callee) is already done.
+    InstructionOperand* op =
+        g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
+                      buffer->descriptor->GetInputType(index));
+    if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
+      int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+      if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
+        buffer->pushed_nodes.resize(stack_index + 1, NULL);
+      }
+      DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
+      buffer->pushed_nodes[stack_index] = *iter;
+      pushed_count++;
+    } else {
+      buffer->instruction_args.push_back(op);
+    }
+  }
+  CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
+  DCHECK(static_cast<size_t>(input_count) ==
+         (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
+          buffer->frame_state_value_count()));
+}
+
+
+void InstructionSelector::VisitBlock(BasicBlock* block) {
+  DCHECK_EQ(NULL, current_block_);
+  current_block_ = block;
+  int current_block_end = static_cast<int>(instructions_.size());
+
+  // Generate code for the block control "top down", but schedule the code
+  // "bottom up".
+  VisitControl(block);
+  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+
+  // Visit code in reverse control flow order, because architecture-specific
+  // matching may cover more than one node at a time.
+  for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
+       ++i) {
+    Node* node = *i;
+    // Skip nodes that are unused or already defined.
+    if (!IsUsed(node) || IsDefined(node)) continue;
+    // Generate code for this node "top down", but schedule the code "bottom
+    // up".
+    size_t current_node_end = instructions_.size();
+    VisitNode(node);
+    std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+  }
+
+  // We're done with the block.
+  // TODO(bmeurer): We should not mutate the schedule.
+  block->code_end_ = current_block_end;
+  block->code_start_ = static_cast<int>(instructions_.size());
+
+  current_block_ = NULL;
+}
+
+
+static inline void CheckNoPhis(const BasicBlock* block) {
+#ifdef DEBUG
+  // Branch targets should not have phis.
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    const Node* node = *i;
+    CHECK_NE(IrOpcode::kPhi, node->opcode());
+  }
+#endif
+}
+
+
+void InstructionSelector::VisitControl(BasicBlock* block) {
+  Node* input = block->control_input_;
+  switch (block->control_) {
+    case BasicBlockData::kGoto:
+      return VisitGoto(block->SuccessorAt(0));
+    case BasicBlockData::kBranch: {
+      DCHECK_EQ(IrOpcode::kBranch, input->opcode());
+      BasicBlock* tbranch = block->SuccessorAt(0);
+      BasicBlock* fbranch = block->SuccessorAt(1);
+      // SSA deconstruction requires targets of branches not to have phis.
+      // Edge split form guarantees this property, but is more strict.
+      CheckNoPhis(tbranch);
+      CheckNoPhis(fbranch);
+      if (tbranch == fbranch) return VisitGoto(tbranch);
+      return VisitBranch(input, tbranch, fbranch);
+    }
+    case BasicBlockData::kReturn: {
+      // If the result itself is a return, return its input.
+      Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+                        ? input->InputAt(0)
+                        : input;
+      return VisitReturn(value);
+    }
+    case BasicBlockData::kThrow:
+      return VisitThrow(input);
+    case BasicBlockData::kNone: {
+      // TODO(titzer): exit block doesn't have control.
+      DCHECK(input == NULL);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void InstructionSelector::VisitNode(Node* node) {
+  DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
+  SourcePosition source_position = source_positions_->GetSourcePosition(node);
+  if (!source_position.IsUnknown()) {
+    DCHECK(!source_position.IsInvalid());
+    if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
+      Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
+    }
+  }
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+    case IrOpcode::kLoop:
+    case IrOpcode::kEnd:
+    case IrOpcode::kBranch:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kMerge:
+      // No code needed for these graph artifacts.
+      return;
+    case IrOpcode::kFinish:
+      return MarkAsReference(node), VisitFinish(node);
+    case IrOpcode::kParameter: {
+      MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
+      MarkAsRepresentation(type, node);
+      return VisitParameter(node);
+    }
+    case IrOpcode::kPhi: {
+      MachineType type = OpParameter<MachineType>(node);
+      MarkAsRepresentation(type, node);
+      return VisitPhi(node);
+    }
+    case IrOpcode::kProjection:
+      return VisitProjection(node);
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kExternalConstant:
+      return VisitConstant(node);
+    case IrOpcode::kFloat64Constant:
+      return MarkAsDouble(node), VisitConstant(node);
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kNumberConstant:
+      // TODO(turbofan): only mark non-smis as references.
+      return MarkAsReference(node), VisitConstant(node);
+    case IrOpcode::kCall:
+      return VisitCall(node, NULL, NULL);
+    case IrOpcode::kFrameState:
+    case IrOpcode::kStateValues:
+      return;
+    case IrOpcode::kLoad: {
+      LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
+      MarkAsRepresentation(rep, node);
+      return VisitLoad(node);
+    }
+    case IrOpcode::kStore:
+      return VisitStore(node);
+    case IrOpcode::kWord32And:
+      return VisitWord32And(node);
+    case IrOpcode::kWord32Or:
+      return VisitWord32Or(node);
+    case IrOpcode::kWord32Xor:
+      return VisitWord32Xor(node);
+    case IrOpcode::kWord32Shl:
+      return VisitWord32Shl(node);
+    case IrOpcode::kWord32Shr:
+      return VisitWord32Shr(node);
+    case IrOpcode::kWord32Sar:
+      return VisitWord32Sar(node);
+    case IrOpcode::kWord32Ror:
+      return VisitWord32Ror(node);
+    case IrOpcode::kWord32Equal:
+      return VisitWord32Equal(node);
+    case IrOpcode::kWord64And:
+      return VisitWord64And(node);
+    case IrOpcode::kWord64Or:
+      return VisitWord64Or(node);
+    case IrOpcode::kWord64Xor:
+      return VisitWord64Xor(node);
+    case IrOpcode::kWord64Shl:
+      return VisitWord64Shl(node);
+    case IrOpcode::kWord64Shr:
+      return VisitWord64Shr(node);
+    case IrOpcode::kWord64Sar:
+      return VisitWord64Sar(node);
+    case IrOpcode::kWord64Ror:
+      return VisitWord64Ror(node);
+    case IrOpcode::kWord64Equal:
+      return VisitWord64Equal(node);
+    case IrOpcode::kInt32Add:
+      return VisitInt32Add(node);
+    case IrOpcode::kInt32AddWithOverflow:
+      return VisitInt32AddWithOverflow(node);
+    case IrOpcode::kInt32Sub:
+      return VisitInt32Sub(node);
+    case IrOpcode::kInt32SubWithOverflow:
+      return VisitInt32SubWithOverflow(node);
+    case IrOpcode::kInt32Mul:
+      return VisitInt32Mul(node);
+    case IrOpcode::kInt32Div:
+      return VisitInt32Div(node);
+    case IrOpcode::kInt32UDiv:
+      return VisitInt32UDiv(node);
+    case IrOpcode::kInt32Mod:
+      return VisitInt32Mod(node);
+    case IrOpcode::kInt32UMod:
+      return VisitInt32UMod(node);
+    case IrOpcode::kInt32LessThan:
+      return VisitInt32LessThan(node);
+    case IrOpcode::kInt32LessThanOrEqual:
+      return VisitInt32LessThanOrEqual(node);
+    case IrOpcode::kUint32LessThan:
+      return VisitUint32LessThan(node);
+    case IrOpcode::kUint32LessThanOrEqual:
+      return VisitUint32LessThanOrEqual(node);
+    case IrOpcode::kInt64Add:
+      return VisitInt64Add(node);
+    case IrOpcode::kInt64Sub:
+      return VisitInt64Sub(node);
+    case IrOpcode::kInt64Mul:
+      return VisitInt64Mul(node);
+    case IrOpcode::kInt64Div:
+      return VisitInt64Div(node);
+    case IrOpcode::kInt64UDiv:
+      return VisitInt64UDiv(node);
+    case IrOpcode::kInt64Mod:
+      return VisitInt64Mod(node);
+    case IrOpcode::kInt64UMod:
+      return VisitInt64UMod(node);
+    case IrOpcode::kInt64LessThan:
+      return VisitInt64LessThan(node);
+    case IrOpcode::kInt64LessThanOrEqual:
+      return VisitInt64LessThanOrEqual(node);
+    case IrOpcode::kChangeInt32ToFloat64:
+      return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+    case IrOpcode::kChangeUint32ToFloat64:
+      return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+    case IrOpcode::kChangeFloat64ToInt32:
+      return VisitChangeFloat64ToInt32(node);
+    case IrOpcode::kChangeFloat64ToUint32:
+      return VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kChangeInt32ToInt64:
+      return VisitChangeInt32ToInt64(node);
+    case IrOpcode::kChangeUint32ToUint64:
+      return VisitChangeUint32ToUint64(node);
+    case IrOpcode::kTruncateFloat64ToInt32:
+      return VisitTruncateFloat64ToInt32(node);
+    case IrOpcode::kTruncateInt64ToInt32:
+      return VisitTruncateInt64ToInt32(node);
+    case IrOpcode::kFloat64Add:
+      return MarkAsDouble(node), VisitFloat64Add(node);
+    case IrOpcode::kFloat64Sub:
+      return MarkAsDouble(node), VisitFloat64Sub(node);
+    case IrOpcode::kFloat64Mul:
+      return MarkAsDouble(node), VisitFloat64Mul(node);
+    case IrOpcode::kFloat64Div:
+      return MarkAsDouble(node), VisitFloat64Div(node);
+    case IrOpcode::kFloat64Mod:
+      return MarkAsDouble(node), VisitFloat64Mod(node);
+    case IrOpcode::kFloat64Sqrt:
+      return MarkAsDouble(node), VisitFloat64Sqrt(node);
+    case IrOpcode::kFloat64Equal:
+      return VisitFloat64Equal(node);
+    case IrOpcode::kFloat64LessThan:
+      return VisitFloat64LessThan(node);
+    case IrOpcode::kFloat64LessThanOrEqual:
+      return VisitFloat64LessThanOrEqual(node);
+    default:
+      V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+               node->opcode(), node->op()->mnemonic(), node->id());
+  }
+}
+
+
+#if V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord32Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32Test(m.left().node(), &cont);
+  }
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64Test(m.left().node(), &cont);
+  }
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitInt32AddWithOverflow(node, &cont);
+  }
+  FlagsContinuation cont;
+  VisitInt32AddWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitInt32SubWithOverflow(node, &cont);
+  }
+  FlagsContinuation cont;
+  VisitInt32SubWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+#endif  // V8_TURBOFAN_BACKEND
+
+// 32 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  UNIMPLEMENTED();
+}
+
+#endif  // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+
+// 32-bit targets and unsupported architectures need dummy implementations of
+// selected 64-bit ops.
+#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+#endif  // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+
+void InstructionSelector::VisitFinish(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitParameter(Node* node) {
+  OperandGenerator g(this);
+  int index = OpParameter<int>(node);
+  Emit(kArchNop,
+       g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
+                          linkage()->GetParameterType(index)));
+}
+
+
+void InstructionSelector::VisitPhi(Node* node) {
+  // TODO(bmeurer): Emit a PhiInstruction here.
+  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+    MarkAsUsed(*i);
+  }
+}
+
+
+void InstructionSelector::VisitProjection(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32SubWithOverflow:
+      if (OpParameter<size_t>(node) == 0) {
+        Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      } else {
+        DCHECK(OpParameter<size_t>(node) == 1u);
+        MarkAsUsed(value);
+      }
+      break;
+    default:
+      break;
+  }
+}
+
+
+void InstructionSelector::VisitConstant(Node* node) {
+  // We must emit a NOP here because every live range needs a defining
+  // instruction in the register allocator.
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineAsConstant(node));
+}
+
+
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+  if (IsNextInAssemblyOrder(target)) {
+    // fall through to the next block.
+    Emit(kArchNop, NULL)->MarkAsControl();
+  } else {
+    // jump to the next block.
+    OperandGenerator g(this);
+    Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  OperandGenerator g(this);
+  Node* user = branch;
+  Node* value = branch->InputAt(0);
+
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+  // If we can fall through to the true block, invert the branch.
+  if (IsNextInAssemblyOrder(tbranch)) {
+    cont.Negate();
+    cont.SwapBlocks();
+  }
+
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  // Try to combine the branch with a comparison.
+  if (CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kWord64Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kFloat64Equal:
+        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitInt32AddWithOverflow(node, &cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitInt32SubWithOverflow(node, &cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      default:
+        break;
+    }
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  VisitWord32Test(value, &cont);
+}
+
+
+void InstructionSelector::VisitReturn(Node* value) {
+  OperandGenerator g(this);
+  if (value != NULL) {
+    Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
+                                       linkage()->GetReturnType()));
+  } else {
+    Emit(kArchRet, NULL);
+  }
+}
+
+
+void InstructionSelector::VisitThrow(Node* value) {
+  UNIMPLEMENTED();  // TODO(titzer)
+}
+
+
+FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
+    Node* state) {
+  DCHECK(state->opcode() == IrOpcode::kFrameState);
+  DCHECK_EQ(5, state->InputCount());
+  FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+  int parameters = OpParameter<int>(state->InputAt(0));
+  int locals = OpParameter<int>(state->InputAt(1));
+  int stack = OpParameter<int>(state->InputAt(2));
+
+  FrameStateDescriptor* outer_state = NULL;
+  Node* outer_node = state->InputAt(4);
+  if (outer_node->opcode() == IrOpcode::kFrameState) {
+    outer_state = GetFrameStateDescriptor(outer_node);
+  }
+
+  return new (instruction_zone())
+      FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
+}
+
+
+static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
+  switch (input->opcode()) {
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kHeapConstant:
+      return g->UseImmediate(input);
+    default:
+      return g->UseUnique(input);
+  }
+}
+
+
+void InstructionSelector::AddFrameStateInputs(
+    Node* state, InstructionOperandVector* inputs,
+    FrameStateDescriptor* descriptor) {
+  DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+  if (descriptor->outer_state() != NULL) {
+    AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
+  }
+
+  Node* parameters = state->InputAt(0);
+  Node* locals = state->InputAt(1);
+  Node* stack = state->InputAt(2);
+  Node* context = state->InputAt(3);
+
+  DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
+
+  DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
+  DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
+  DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
+
+  OperandGenerator g(this);
+  for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+  }
+  if (descriptor->HasContext()) {
+    inputs->push_back(UseOrImmediate(&g, context));
+  }
+  for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
+  }
+  for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
+  }
+}
+
+
+#if !V8_TURBOFAN_BACKEND
+
+#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
+  void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
+MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
+#undef DECLARE_UNIMPLEMENTED_SELECTOR
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {}
+
+#endif  // !V8_TURBOFAN_BACKEND
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
new file mode 100644
index 0000000..a86e156
--- /dev/null
+++ b/src/compiler/instruction-selector.h
@@ -0,0 +1,213 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+
+#include <deque>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct CallBuffer;  // TODO(bmeurer): Remove this.
+class FlagsContinuation;
+
+class InstructionSelector FINAL {
+ public:
+  // Forward declarations.
+  class Features;
+
+  InstructionSelector(InstructionSequence* sequence,
+                      SourcePositionTable* source_positions,
+                      Features features = SupportedFeatures());
+
+  // Visit code for the entire graph with the included schedule.
+  void SelectInstructions();
+
+  // ===========================================================================
+  // ============= Architecture-independent code emission methods. =============
+  // ===========================================================================
+
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, size_t output_count,
+                    InstructionOperand** outputs, size_t input_count,
+                    InstructionOperand** inputs, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(Instruction* instr);
+
+  // ===========================================================================
+  // ============== Architecture-independent CPU feature methods. ==============
+  // ===========================================================================
+
+  class Features FINAL {
+   public:
+    Features() : bits_(0) {}
+    explicit Features(unsigned bits) : bits_(bits) {}
+    explicit Features(CpuFeature f) : bits_(1u << f) {}
+    Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
+
+    bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
+
+   private:
+    unsigned bits_;
+  };
+
+  bool IsSupported(CpuFeature feature) const {
+    return features_.Contains(feature);
+  }
+
+  // Returns the features supported on the target platform.
+  static Features SupportedFeatures() {
+    return Features(CpuFeatures::SupportedFeatures());
+  }
+
+ private:
+  friend class OperandGenerator;
+
+  // ===========================================================================
+  // ============ Architecture-independent graph covering methods. =============
+  // ===========================================================================
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const;
+
+  // Used in pattern matching during code generation.
+  // Check if {node} can be covered while generating code for the current
+  // instruction. A node can be covered if the {user} of the node has the only
+  // edge and the two are in the same basic block.
+  bool CanCover(Node* user, Node* node) const;
+
+  // Checks if {node} was already defined, and therefore code was already
+  // generated for it.
+  bool IsDefined(Node* node) const;
+
+  // Inform the instruction selection that {node} was just defined.
+  void MarkAsDefined(Node* node);
+
+  // Checks if {node} has any uses, and therefore code has to be generated for
+  // it.
+  bool IsUsed(Node* node) const;
+
+  // Inform the instruction selection that {node} has at least one use and we
+  // will need to generate code for it.
+  void MarkAsUsed(Node* node);
+
+  // Checks if {node} is marked as double.
+  bool IsDouble(const Node* node) const;
+
+  // Inform the register allocator of a double result.
+  void MarkAsDouble(Node* node);
+
+  // Checks if {node} is marked as reference.
+  bool IsReference(const Node* node) const;
+
+  // Inform the register allocator of a reference result.
+  void MarkAsReference(Node* node);
+
+  // Inform the register allocation of the representation of the value produced
+  // by {node}.
+  void MarkAsRepresentation(MachineType rep, Node* node);
+
+  // Initialize the call buffer with the InstructionOperands, nodes, etc,
+  // corresponding
+  // to the inputs and outputs of the call.
+  // {call_code_immediate} to generate immediate operands to calls of code.
+  // {call_address_immediate} to generate immediate operands to address calls.
+  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                            bool call_code_immediate,
+                            bool call_address_immediate);
+
+  FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+  void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
+                           FrameStateDescriptor* descriptor);
+
+  // ===========================================================================
+  // ============= Architecture-specific graph covering methods. ===============
+  // ===========================================================================
+
+  // Visit nodes in the given block and generate code.
+  void VisitBlock(BasicBlock* block);
+
+  // Visit the node for the control flow at the end of the block, generating
+  // code if necessary.
+  void VisitControl(BasicBlock* block);
+
+  // Visit the node and generate code, if any.
+  void VisitNode(Node* node);
+
+#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
+  MACHINE_OP_LIST(DECLARE_GENERATOR)
+#undef DECLARE_GENERATOR
+
+  void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
+  void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
+
+  void VisitWord32Test(Node* node, FlagsContinuation* cont);
+  void VisitWord64Test(Node* node, FlagsContinuation* cont);
+  void VisitWord32Compare(Node* node, FlagsContinuation* cont);
+  void VisitWord64Compare(Node* node, FlagsContinuation* cont);
+  void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+
+  void VisitFinish(Node* node);
+  void VisitParameter(Node* node);
+  void VisitPhi(Node* node);
+  void VisitProjection(Node* node);
+  void VisitConstant(Node* node);
+  void VisitCall(Node* call, BasicBlock* continuation,
+                 BasicBlock* deoptimization);
+  void VisitGoto(BasicBlock* target);
+  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+  void VisitReturn(Node* value);
+  void VisitThrow(Node* value);
+  void VisitDeoptimize(Node* deopt);
+
+  // ===========================================================================
+
+  Graph* graph() const { return sequence()->graph(); }
+  Linkage* linkage() const { return sequence()->linkage(); }
+  Schedule* schedule() const { return sequence()->schedule(); }
+  InstructionSequence* sequence() const { return sequence_; }
+  Zone* instruction_zone() const { return sequence()->zone(); }
+  Zone* zone() { return &zone_; }
+
+  // ===========================================================================
+
+  Zone zone_;
+  InstructionSequence* sequence_;
+  SourcePositionTable* source_positions_;
+  Features features_;
+  BasicBlock* current_block_;
+  ZoneDeque<Instruction*> instructions_;
+  BoolVector defined_;
+  BoolVector used_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
new file mode 100644
index 0000000..9ab81b6
--- /dev/null
+++ b/src/compiler/instruction.cc
@@ -0,0 +1,484 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const InstructionOperand& op) {
+  switch (op.kind()) {
+    case InstructionOperand::INVALID:
+      return os << "(0)";
+    case InstructionOperand::UNALLOCATED: {
+      const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
+      os << "v" << unalloc->virtual_register();
+      if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+        return os << "(=" << unalloc->fixed_slot_index() << "S)";
+      }
+      switch (unalloc->extended_policy()) {
+        case UnallocatedOperand::NONE:
+          return os;
+        case UnallocatedOperand::FIXED_REGISTER:
+          return os << "(=" << Register::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+          return os << "(=" << DoubleRegister::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::MUST_HAVE_REGISTER:
+          return os << "(R)";
+        case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+          return os << "(1)";
+        case UnallocatedOperand::ANY:
+          return os << "(-)";
+      }
+    }
+    case InstructionOperand::CONSTANT:
+      return os << "[constant:" << op.index() << "]";
+    case InstructionOperand::IMMEDIATE:
+      return os << "[immediate:" << op.index() << "]";
+    case InstructionOperand::STACK_SLOT:
+      return os << "[stack:" << op.index() << "]";
+    case InstructionOperand::DOUBLE_STACK_SLOT:
+      return os << "[double_stack:" << op.index() << "]";
+    case InstructionOperand::REGISTER:
+      return os << "[" << Register::AllocationIndexToString(op.index())
+                << "|R]";
+    case InstructionOperand::DOUBLE_REGISTER:
+      return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
+                << "|R]";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+SubKindOperand<kOperandKind, kNumCachedOperands>*
+    SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new SubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+  cache = NULL;
+}
+
+
+void InstructionOperand::SetUpCaches() {
+#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
+  name##Operand::SetUpCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
+#undef INSTRUCTION_OPERAND_SETUP
+}
+
+
+void InstructionOperand::TearDownCaches() {
+#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
+  name##Operand::TearDownCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
+#undef INSTRUCTION_OPERAND_TEARDOWN
+}
+
+
+OStream& operator<<(OStream& os, const MoveOperands& mo) {
+  os << *mo.destination();
+  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+  return os << ";";
+}
+
+
+bool ParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+OStream& operator<<(OStream& os, const ParallelMove& pm) {
+  bool first = true;
+  for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
+       move != pm.move_operands()->end(); ++move) {
+    if (move->IsEliminated()) continue;
+    if (!first) os << " ";
+    first = false;
+    os << *move;
+  }
+  return os;
+}
+
+
+void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op, zone);
+}
+
+
+void PointerMap::RemovePointer(InstructionOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op, zone);
+}
+
+
+OStream& operator<<(OStream& os, const PointerMap& pm) {
+  os << "{";
+  for (ZoneList<InstructionOperand*>::iterator op =
+           pm.pointer_operands_.begin();
+       op != pm.pointer_operands_.end(); ++op) {
+    if (op != pm.pointer_operands_.begin()) os << ";";
+    os << *op;
+  }
+  return os << "}";
+}
+
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+  switch (ao) {
+#define CASE(Name) \
+  case k##Name:    \
+    return os << #Name;
+    ARCH_OPCODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const AddressingMode& am) {
+  switch (am) {
+    case kMode_None:
+      return os;
+#define CASE(Name)   \
+  case kMode_##Name: \
+    return os << #Name;
+      TARGET_ADDRESSING_MODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsMode& fm) {
+  switch (fm) {
+    case kFlags_none:
+      return os;
+    case kFlags_branch:
+      return os << "branch";
+    case kFlags_set:
+      return os << "set";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+  switch (fc) {
+    case kEqual:
+      return os << "equal";
+    case kNotEqual:
+      return os << "not equal";
+    case kSignedLessThan:
+      return os << "signed less than";
+    case kSignedGreaterThanOrEqual:
+      return os << "signed greater than or equal";
+    case kSignedLessThanOrEqual:
+      return os << "signed less than or equal";
+    case kSignedGreaterThan:
+      return os << "signed greater than";
+    case kUnsignedLessThan:
+      return os << "unsigned less than";
+    case kUnsignedGreaterThanOrEqual:
+      return os << "unsigned greater than or equal";
+    case kUnsignedLessThanOrEqual:
+      return os << "unsigned less than or equal";
+    case kUnsignedGreaterThan:
+      return os << "unsigned greater than";
+    case kUnorderedEqual:
+      return os << "unordered equal";
+    case kUnorderedNotEqual:
+      return os << "unordered not equal";
+    case kUnorderedLessThan:
+      return os << "unordered less than";
+    case kUnorderedGreaterThanOrEqual:
+      return os << "unordered greater than or equal";
+    case kUnorderedLessThanOrEqual:
+      return os << "unordered less than or equal";
+    case kUnorderedGreaterThan:
+      return os << "unordered greater than";
+    case kOverflow:
+      return os << "overflow";
+    case kNotOverflow:
+      return os << "not overflow";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Instruction& instr) {
+  if (instr.OutputCount() > 1) os << "(";
+  for (size_t i = 0; i < instr.OutputCount(); i++) {
+    if (i > 0) os << ", ";
+    os << *instr.OutputAt(i);
+  }
+
+  if (instr.OutputCount() > 1) os << ") = ";
+  if (instr.OutputCount() == 1) os << " = ";
+
+  if (instr.IsGapMoves()) {
+    const GapInstruction* gap = GapInstruction::cast(&instr);
+    os << (instr.IsBlockStart() ? " block-start" : "gap ");
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      os << "(";
+      if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+      os << ") ";
+    }
+  } else if (instr.IsSourcePosition()) {
+    const SourcePositionInstruction* pos =
+        SourcePositionInstruction::cast(&instr);
+    os << "position (" << pos->source_position().raw() << ")";
+  } else {
+    os << ArchOpcodeField::decode(instr.opcode());
+    AddressingMode am = AddressingModeField::decode(instr.opcode());
+    if (am != kMode_None) {
+      os << " : " << AddressingModeField::decode(instr.opcode());
+    }
+    FlagsMode fm = FlagsModeField::decode(instr.opcode());
+    if (fm != kFlags_none) {
+      os << " && " << fm << " if "
+         << FlagsConditionField::decode(instr.opcode());
+    }
+  }
+  if (instr.InputCount() > 0) {
+    for (size_t i = 0; i < instr.InputCount(); i++) {
+      os << " " << *instr.InputAt(i);
+    }
+  }
+  return os << "\n";
+}
+
+
+OStream& operator<<(OStream& os, const Constant& constant) {
+  switch (constant.type()) {
+    case Constant::kInt32:
+      return os << constant.ToInt32();
+    case Constant::kInt64:
+      return os << constant.ToInt64() << "l";
+    case Constant::kFloat64:
+      return os << constant.ToFloat64();
+    case Constant::kExternalReference:
+      return os << constant.ToExternalReference().address();
+    case Constant::kHeapObject:
+      return os << Brief(*constant.ToHeapObject());
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+Label* InstructionSequence::GetLabel(BasicBlock* block) {
+  return GetBlockStart(block)->label();
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
+  return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock* block) {
+  block->code_start_ = static_cast<int>(instructions_.size());
+  BlockStartInstruction* block_start =
+      BlockStartInstruction::New(zone(), block);
+  AddInstruction(block_start, block);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock* block) {
+  int end = static_cast<int>(instructions_.size());
+  DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
+  block->code_end_ = end;
+}
+
+
+int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+  // TODO(titzer): the order of these gaps is a holdover from Lithium.
+  GapInstruction* gap = GapInstruction::New(zone());
+  if (instr->IsControl()) instructions_.push_back(gap);
+  int index = static_cast<int>(instructions_.size());
+  instructions_.push_back(instr);
+  if (!instr->IsControl()) instructions_.push_back(gap);
+  if (instr->NeedsPointerMap()) {
+    DCHECK(instr->pointer_map() == NULL);
+    PointerMap* pointer_map = new (zone()) PointerMap(zone());
+    pointer_map->set_instruction_position(index);
+    instr->set_pointer_map(pointer_map);
+    pointer_maps_.push_back(pointer_map);
+  }
+  return index;
+}
+
+
+BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
+  // TODO(turbofan): Optimize this.
+  for (;;) {
+    DCHECK_LE(0, instruction_index);
+    Instruction* instruction = InstructionAt(instruction_index--);
+    if (instruction->IsBlockStart()) {
+      return BlockStartInstruction::cast(instruction)->block();
+    }
+  }
+}
+
+
+bool InstructionSequence::IsReference(int virtual_register) const {
+  return references_.find(virtual_register) != references_.end();
+}
+
+
+bool InstructionSequence::IsDouble(int virtual_register) const {
+  return doubles_.find(virtual_register) != doubles_.end();
+}
+
+
+void InstructionSequence::MarkAsReference(int virtual_register) {
+  references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+  doubles_.insert(virtual_register);
+}
+
+
+void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
+                                     InstructionOperand* to) {
+  GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
+      from, to, zone());
+}
+
+
+InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
+    FrameStateDescriptor* descriptor) {
+  int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
+  deoptimization_entries_.push_back(descriptor);
+  return StateId::FromInt(deoptimization_id);
+}
+
+FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
+    InstructionSequence::StateId state_id) {
+  return deoptimization_entries_[state_id.ToInt()];
+}
+
+
+int InstructionSequence::GetFrameStateDescriptorCount() {
+  return static_cast<int>(deoptimization_entries_.size());
+}
+
+
+OStream& operator<<(OStream& os, const InstructionSequence& code) {
+  for (size_t i = 0; i < code.immediates_.size(); ++i) {
+    Constant constant = code.immediates_[i];
+    os << "IMM#" << i << ": " << constant << "\n";
+  }
+  int i = 0;
+  for (ConstantMap::const_iterator it = code.constants_.begin();
+       it != code.constants_.end(); ++i, ++it) {
+    os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
+  }
+  for (int i = 0; i < code.BasicBlockCount(); i++) {
+    BasicBlock* block = code.BlockAt(i);
+
+    int bid = block->id();
+    os << "RPO#" << block->rpo_number_ << ": B" << bid;
+    CHECK(block->rpo_number_ == i);
+    if (block->IsLoopHeader()) {
+      os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
+         << ")";
+    }
+    os << "  instructions: [" << block->code_start_ << ", " << block->code_end_
+       << ")\n  predecessors:";
+
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
+         iter != predecessors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      os << "     phi: v" << phi->id() << " =";
+      Node::Inputs inputs = phi->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter) {
+        os << " v" << (*iter)->id();
+      }
+      os << "\n";
+    }
+
+    ScopedVector<char> buf(32);
+    for (int j = block->first_instruction_index();
+         j <= block->last_instruction_index(); j++) {
+      // TODO(svenpanne) Add some basic formatting to our streams.
+      SNPrintF(buf, "%5d", j);
+      os << "   " << buf.start() << ": " << *code.InstructionAt(j);
+    }
+
+    os << "  " << block->control_;
+
+    if (block->control_input_ != NULL) {
+      os << " v" << block->control_input_->id();
+    }
+
+    BasicBlock::Successors successors = block->successors();
+    for (BasicBlock::Successors::iterator iter = successors.begin();
+         iter != successors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+  }
+  return os;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
new file mode 100644
index 0000000..6d00784
--- /dev/null
+++ b/src/compiler/instruction.h
@@ -0,0 +1,940 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_H_
+#define V8_COMPILER_INSTRUCTION_H_
+
+#include <deque>
+#include <map>
+#include <set>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+// TODO(titzer): don't include the macro-assembler?
+#include "src/macro-assembler.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class OStream;
+
+namespace compiler {
+
+// Forward declarations.
+class Linkage;
+
+// A couple of reserved opcodes are used for internal use.
+const InstructionCode kGapInstruction = -1;
+const InstructionCode kBlockStartInstruction = -2;
+const InstructionCode kSourcePositionInstruction = -3;
+
+
+#define INSTRUCTION_OPERAND_LIST(V)              \
+  V(Constant, CONSTANT, 128)                     \
+  V(Immediate, IMMEDIATE, 128)                   \
+  V(StackSlot, STACK_SLOT, 128)                  \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
+  V(Register, REGISTER, Register::kNumRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+class InstructionOperand : public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT,
+    IMMEDIATE,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER
+  };
+
+  InstructionOperand() : value_(KindField::encode(INVALID)) {}
+  InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> KindField::kSize; }
+#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+  bool Is##name() const { return kind() == type; }
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
+  INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+  INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef INSTRUCTION_OPERAND_PREDICATE
+  bool Equals(InstructionOperand* other) const {
+    return value_ == other->value_;
+  }
+
+  void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
+    value_ = KindField::encode(kind);
+    value_ |= index << KindField::kSize;
+    DCHECK(this->index() == index);
+  }
+
+  // Calls SetUpCache()/TearDownCache() for each subclass.
+  static void SetUpCaches();
+  static void TearDownCaches();
+
+ protected:
+  typedef BitField<Kind, 0, 3> KindField;
+
+  unsigned value_;
+};
+
+typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
+
+OStream& operator<<(OStream& os, const InstructionOperand& op);
+
+class UnallocatedOperand : public InstructionOperand {
+ public:
+  enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
+
+  enum ExtendedPolicy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    MUST_HAVE_REGISTER,
+    SAME_AS_FIRST_INPUT
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit UnallocatedOperand(ExtendedPolicy policy)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+  }
+
+  UnallocatedOperand(BasicPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_SLOT);
+    value_ |= BasicPolicyField::encode(policy);
+    value_ |= index << FixedSlotIndexField::kShift;
+    DCHECK(this->fixed_slot_index() == index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+    value_ |= FixedRegisterField::encode(index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+  }
+
+  UnallocatedOperand* CopyUnconstrained(Zone* zone) {
+    UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static const UnallocatedOperand* cast(const InstructionOperand* op) {
+    DCHECK(op->IsUnallocated());
+    return static_cast<const UnallocatedOperand*>(op);
+  }
+
+  static UnallocatedOperand* cast(InstructionOperand* op) {
+    DCHECK(op->IsUnallocated());
+    return static_cast<UnallocatedOperand*>(op);
+  }
+
+  // The encoding used for UnallocatedOperand operands depends on the policy
+  // that is
+  // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+  // because it accommodates a larger pay-load.
+  //
+  // For FIXED_SLOT policy:
+  //     +------------------------------------------+
+  //     |       slot_index      |  vreg  | 0 | 001 |
+  //     +------------------------------------------+
+  //
+  // For all other (extended) policies:
+  //     +------------------------------------------+
+  //     |  reg_index  | L | PPP |  vreg  | 1 | 001 |    L ... Lifetime
+  //     +------------------------------------------+    P ... Policy
+  //
+  // The slot index is a signed value which requires us to decode it manually
+  // instead of using the BitField utility class.
+
+  // The superclass has a KindField.
+  STATIC_ASSERT(KindField::kSize == 3);
+
+  // BitFields for all unallocated operands.
+  class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+  class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+  // BitFields specific to BasicPolicy::FIXED_SLOT.
+  class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+  // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+  class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+  class LifetimeField : public BitField<Lifetime, 25, 1> {};
+  class FixedRegisterField : public BitField<int, 26, 6> {};
+
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+  static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+  static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+  // Predicates for the operand policy.
+  bool HasAnyPolicy() const {
+    return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return basic_policy() == FIXED_SLOT ||
+           extended_policy() == FIXED_REGISTER ||
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+  bool HasRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == MUST_HAVE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == SAME_AS_FIRST_INPUT;
+  }
+  bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
+  bool HasFixedRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_REGISTER;
+  }
+  bool HasFixedDoubleRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+
+  // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+  BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+
+  // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+  ExtendedPolicy extended_policy() const {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return ExtendedPolicyField::decode(value_);
+  }
+
+  // [fixed_slot_index]: Only for FIXED_SLOT.
+  int fixed_slot_index() const {
+    DCHECK(HasFixedSlotPolicy());
+    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+  }
+
+  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+  int fixed_register_index() const {
+    DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    return FixedRegisterField::decode(value_);
+  }
+
+  // [virtual_register]: The virtual register ID for this operand.
+  int virtual_register() const { return VirtualRegisterField::decode(value_); }
+  void set_virtual_register(unsigned id) {
+    value_ = VirtualRegisterField::update(value_, id);
+  }
+
+  // [lifetime]: Only for non-FIXED_SLOT.
+  bool IsUsedAtStart() {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+};
+
+
+class MoveOperands FINAL {
+ public:
+  MoveOperands(InstructionOperand* source, InstructionOperand* destination)
+      : source_(source), destination_(destination) {}
+
+  InstructionOperand* source() const { return source_; }
+  void set_source(InstructionOperand* operand) { source_ = operand; }
+
+  InstructionOperand* destination() const { return destination_; }
+  void set_destination(InstructionOperand* operand) { destination_ = operand; }
+
+  // The gap resolver marks moves as "in-progress" by clearing the
+  // destination (but not the source).
+  bool IsPending() const { return destination_ == NULL && source_ != NULL; }
+
+  // True if this move a move into the given destination operand.
+  bool Blocks(InstructionOperand* operand) const {
+    return !IsEliminated() && source()->Equals(operand);
+  }
+
+  // A move is redundant if it's been eliminated, if its source and
+  // destination are the same, or if its destination is unneeded or constant.
+  bool IsRedundant() const {
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstant());
+  }
+
+  bool IsIgnored() const {
+    return destination_ != NULL && destination_->IsIgnored();
+  }
+
+  // We clear both operands to indicate move that's been eliminated.
+  void Eliminate() { source_ = destination_ = NULL; }
+  bool IsEliminated() const {
+    DCHECK(source_ != NULL || destination_ == NULL);
+    return source_ == NULL;
+  }
+
+ private:
+  InstructionOperand* source_;
+  InstructionOperand* destination_;
+};
+
+OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+class SubKindOperand FINAL : public InstructionOperand {
+ public:
+  static SubKindOperand* Create(int index, Zone* zone) {
+    DCHECK(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new (zone) SubKindOperand(index);
+  }
+
+  static SubKindOperand* cast(InstructionOperand* op) {
+    DCHECK(op->kind() == kOperandKind);
+    return reinterpret_cast<SubKindOperand*>(op);
+  }
+
+  static void SetUpCache();
+  static void TearDownCache();
+
+ private:
+  static SubKindOperand* cache;
+
+  SubKindOperand() : InstructionOperand() {}
+  explicit SubKindOperand(int index)
+      : InstructionOperand(kOperandKind, index) {}
+};
+
+
+#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+  typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class ParallelMove FINAL : public ZoneObject {
+ public:
+  explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
+
+  void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
+    move_operands_.Add(MoveOperands(from, to), zone);
+  }
+
+  bool IsRedundant() const;
+
+  ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
+  const ZoneList<MoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+ private:
+  ZoneList<MoveOperands> move_operands_;
+};
+
+OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+class PointerMap FINAL : public ZoneObject {
+ public:
+  explicit PointerMap(Zone* zone)
+      : pointer_operands_(8, zone),
+        untagged_operands_(0, zone),
+        instruction_position_(-1) {}
+
+  const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
+  int instruction_position() const { return instruction_position_; }
+
+  void set_instruction_position(int pos) {
+    DCHECK(instruction_position_ == -1);
+    instruction_position_ = pos;
+  }
+
+  void RecordPointer(InstructionOperand* op, Zone* zone);
+  void RemovePointer(InstructionOperand* op);
+  void RecordUntagged(InstructionOperand* op, Zone* zone);
+
+ private:
+  friend OStream& operator<<(OStream& os, const PointerMap& pm);
+
+  ZoneList<InstructionOperand*> pointer_operands_;
+  ZoneList<InstructionOperand*> untagged_operands_;
+  int instruction_position_;
+};
+
+OStream& operator<<(OStream& os, const PointerMap& pm);
+
+// TODO(titzer): s/PointerMap/ReferenceMap/
+class Instruction : public ZoneObject {
+ public:
+  size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
+  InstructionOperand* OutputAt(size_t i) const {
+    DCHECK(i < OutputCount());
+    return operands_[i];
+  }
+
+  bool HasOutput() const { return OutputCount() == 1; }
+  InstructionOperand* Output() const { return OutputAt(0); }
+
+  size_t InputCount() const { return InputCountField::decode(bit_field_); }
+  InstructionOperand* InputAt(size_t i) const {
+    DCHECK(i < InputCount());
+    return operands_[OutputCount() + i];
+  }
+
+  size_t TempCount() const { return TempCountField::decode(bit_field_); }
+  InstructionOperand* TempAt(size_t i) const {
+    DCHECK(i < TempCount());
+    return operands_[OutputCount() + InputCount() + i];
+  }
+
+  InstructionCode opcode() const { return opcode_; }
+  ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
+  AddressingMode addressing_mode() const {
+    return AddressingModeField::decode(opcode());
+  }
+  FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
+  FlagsCondition flags_condition() const {
+    return FlagsConditionField::decode(opcode());
+  }
+
+  // TODO(titzer): make control and call into flags.
+  static Instruction* New(Zone* zone, InstructionCode opcode) {
+    return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+  }
+
+  static Instruction* New(Zone* zone, InstructionCode opcode,
+                          size_t output_count, InstructionOperand** outputs,
+                          size_t input_count, InstructionOperand** inputs,
+                          size_t temp_count, InstructionOperand** temps) {
+    DCHECK(opcode >= 0);
+    DCHECK(output_count == 0 || outputs != NULL);
+    DCHECK(input_count == 0 || inputs != NULL);
+    DCHECK(temp_count == 0 || temps != NULL);
+    InstructionOperand* none = NULL;
+    USE(none);
+    int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
+                                (output_count + input_count + temp_count - 1) *
+                                    sizeof(none));
+    return new (zone->New(size)) Instruction(
+        opcode, output_count, outputs, input_count, inputs, temp_count, temps);
+  }
+
+  // TODO(titzer): another holdover from lithium days; register allocator
+  // should not need to know about control instructions.
+  Instruction* MarkAsControl() {
+    bit_field_ = IsControlField::update(bit_field_, true);
+    return this;
+  }
+  Instruction* MarkAsCall() {
+    bit_field_ = IsCallField::update(bit_field_, true);
+    return this;
+  }
+  bool IsControl() const { return IsControlField::decode(bit_field_); }
+  bool IsCall() const { return IsCallField::decode(bit_field_); }
+  bool NeedsPointerMap() const { return IsCall(); }
+  bool HasPointerMap() const { return pointer_map_ != NULL; }
+
+  bool IsGapMoves() const {
+    return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
+  }
+  bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+  bool IsSourcePosition() const {
+    return opcode() == kSourcePositionInstruction;
+  }
+
+  bool ClobbersRegisters() const { return IsCall(); }
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersDoubleRegisters() const { return IsCall(); }
+  PointerMap* pointer_map() const { return pointer_map_; }
+
+  void set_pointer_map(PointerMap* map) {
+    DCHECK(NeedsPointerMap());
+    DCHECK_EQ(NULL, pointer_map_);
+    pointer_map_ = map;
+  }
+
+  // Placement new operator so that we can smash instructions into
+  // zone-allocated memory.
+  void* operator new(size_t, void* location) { return location; }
+
+  void operator delete(void* pointer, void* location) { UNREACHABLE(); }
+
+ protected:
+  explicit Instruction(InstructionCode opcode)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+                   TempCountField::encode(0) | IsCallField::encode(false) |
+                   IsControlField::encode(false)),
+        pointer_map_(NULL) {}
+
+  Instruction(InstructionCode opcode, size_t output_count,
+              InstructionOperand** outputs, size_t input_count,
+              InstructionOperand** inputs, size_t temp_count,
+              InstructionOperand** temps)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(output_count) |
+                   InputCountField::encode(input_count) |
+                   TempCountField::encode(temp_count) |
+                   IsCallField::encode(false) | IsControlField::encode(false)),
+        pointer_map_(NULL) {
+    for (size_t i = 0; i < output_count; ++i) {
+      operands_[i] = outputs[i];
+    }
+    for (size_t i = 0; i < input_count; ++i) {
+      operands_[output_count + i] = inputs[i];
+    }
+    for (size_t i = 0; i < temp_count; ++i) {
+      operands_[output_count + input_count + i] = temps[i];
+    }
+  }
+
+ protected:
+  typedef BitField<size_t, 0, 8> OutputCountField;
+  typedef BitField<size_t, 8, 16> InputCountField;
+  typedef BitField<size_t, 24, 6> TempCountField;
+  typedef BitField<bool, 30, 1> IsCallField;
+  typedef BitField<bool, 31, 1> IsControlField;
+
+  InstructionCode opcode_;
+  uint32_t bit_field_;
+  PointerMap* pointer_map_;
+  InstructionOperand* operands_[1];
+};
+
+OStream& operator<<(OStream& os, const Instruction& instr);
+
+// Represents moves inserted before an instruction due to register allocation.
+// TODO(titzer): squash GapInstruction back into Instruction, since essentially
+// every instruction can possibly have moves inserted before it.
+class GapInstruction : public Instruction {
+ public:
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) ParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  ParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+  static GapInstruction* New(Zone* zone) {
+    void* buffer = zone->New(sizeof(GapInstruction));
+    return new (buffer) GapInstruction(kGapInstruction);
+  }
+
+  static GapInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsGapMoves());
+    return static_cast<GapInstruction*>(instr);
+  }
+
+  static const GapInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsGapMoves());
+    return static_cast<const GapInstruction*>(instr);
+  }
+
+ protected:
+  explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+ private:
+  friend OStream& operator<<(OStream& os, const Instruction& instr);
+  ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+};
+
+
+// This special kind of gap move instruction represents the beginning of a
+// block of code.
+// TODO(titzer): move code_start and code_end from BasicBlock to here.
+class BlockStartInstruction FINAL : public GapInstruction {
+ public:
+  BasicBlock* block() const { return block_; }
+  Label* label() { return &label_; }
+
+  static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+    void* buffer = zone->New(sizeof(BlockStartInstruction));
+    return new (buffer) BlockStartInstruction(block);
+  }
+
+  static BlockStartInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsBlockStart());
+    return static_cast<BlockStartInstruction*>(instr);
+  }
+
+ private:
+  explicit BlockStartInstruction(BasicBlock* block)
+      : GapInstruction(kBlockStartInstruction), block_(block) {}
+
+  BasicBlock* block_;
+  Label label_;
+};
+
+
+class SourcePositionInstruction FINAL : public Instruction {
+ public:
+  static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
+    void* buffer = zone->New(sizeof(SourcePositionInstruction));
+    return new (buffer) SourcePositionInstruction(position);
+  }
+
+  SourcePosition source_position() const { return source_position_; }
+
+  static SourcePositionInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsSourcePosition());
+    return static_cast<SourcePositionInstruction*>(instr);
+  }
+
+  static const SourcePositionInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsSourcePosition());
+    return static_cast<const SourcePositionInstruction*>(instr);
+  }
+
+ private:
+  explicit SourcePositionInstruction(SourcePosition source_position)
+      : Instruction(kSourcePositionInstruction),
+        source_position_(source_position) {
+    DCHECK(!source_position_.IsInvalid());
+    DCHECK(!source_position_.IsUnknown());
+  }
+
+  SourcePosition source_position_;
+};
+
+
+class Constant FINAL {
+ public:
+  enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+
+  explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+  explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+  explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
+  explicit Constant(ExternalReference ref)
+      : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
+  explicit Constant(Handle<HeapObject> obj)
+      : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+
+  Type type() const { return type_; }
+
+  int32_t ToInt32() const {
+    DCHECK_EQ(kInt32, type());
+    return static_cast<int32_t>(value_);
+  }
+
+  int64_t ToInt64() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kInt64, type());
+    return value_;
+  }
+
+  double ToFloat64() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kFloat64, type());
+    return bit_cast<double>(value_);
+  }
+
+  ExternalReference ToExternalReference() const {
+    DCHECK_EQ(kExternalReference, type());
+    return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
+  }
+
+  Handle<HeapObject> ToHeapObject() const {
+    DCHECK_EQ(kHeapObject, type());
+    return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+  }
+
+ private:
+  Type type_;
+  int64_t value_;
+};
+
+
+class FrameStateDescriptor : public ZoneObject {
+ public:
+  FrameStateDescriptor(const FrameStateCallInfo& state_info,
+                       size_t parameters_count, size_t locals_count,
+                       size_t stack_count,
+                       FrameStateDescriptor* outer_state = NULL)
+      : type_(state_info.type()),
+        bailout_id_(state_info.bailout_id()),
+        frame_state_combine_(state_info.state_combine()),
+        parameters_count_(parameters_count),
+        locals_count_(locals_count),
+        stack_count_(stack_count),
+        outer_state_(outer_state),
+        jsfunction_(state_info.jsfunction()) {}
+
+  FrameStateType type() const { return type_; }
+  BailoutId bailout_id() const { return bailout_id_; }
+  OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+  size_t parameters_count() const { return parameters_count_; }
+  size_t locals_count() const { return locals_count_; }
+  size_t stack_count() const { return stack_count_; }
+  FrameStateDescriptor* outer_state() const { return outer_state_; }
+  MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+
+  size_t size() const {
+    return parameters_count_ + locals_count_ + stack_count_ +
+           (HasContext() ? 1 : 0);
+  }
+
+  size_t GetTotalSize() const {
+    size_t total_size = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      total_size += iter->size();
+    }
+    return total_size;
+  }
+
+  size_t GetHeight(OutputFrameStateCombine override) const {
+    size_t height = size() - parameters_count();
+    switch (override) {
+      case kPushOutput:
+        ++height;
+        break;
+      case kIgnoreOutput:
+        break;
+    }
+    return height;
+  }
+
+  size_t GetFrameCount() const {
+    size_t count = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      ++count;
+    }
+    return count;
+  }
+
+  size_t GetJSFrameCount() const {
+    size_t count = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      if (iter->type_ == JS_FRAME) {
+        ++count;
+      }
+    }
+    return count;
+  }
+
+  bool HasContext() const { return type_ == JS_FRAME; }
+
+ private:
+  FrameStateType type_;
+  BailoutId bailout_id_;
+  OutputFrameStateCombine frame_state_combine_;
+  size_t parameters_count_;
+  size_t locals_count_;
+  size_t stack_count_;
+  FrameStateDescriptor* outer_state_;
+  MaybeHandle<JSFunction> jsfunction_;
+};
+
+OStream& operator<<(OStream& os, const Constant& constant);
+
+typedef ZoneDeque<Constant> ConstantDeque;
+typedef std::map<int, Constant, std::less<int>,
+                 zone_allocator<std::pair<int, Constant> > > ConstantMap;
+
+typedef ZoneDeque<Instruction*> InstructionDeque;
+typedef ZoneDeque<PointerMap*> PointerMapDeque;
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+
+// Represents architecture-specific generated code before, during, and after
+// register allocation.
+// TODO(titzer): s/IsDouble/IsFloat64/
+class InstructionSequence FINAL {
+ public:
+  InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
+      : graph_(graph),
+        linkage_(linkage),
+        schedule_(schedule),
+        constants_(ConstantMap::key_compare(),
+                   ConstantMap::allocator_type(zone())),
+        immediates_(zone()),
+        instructions_(zone()),
+        next_virtual_register_(graph->NodeCount()),
+        pointer_maps_(zone()),
+        doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+        references_(std::less<int>(),
+                    VirtualRegisterSet::allocator_type(zone())),
+        deoptimization_entries_(zone()) {}
+
+  int NextVirtualRegister() { return next_virtual_register_++; }
+  int VirtualRegisterCount() const { return next_virtual_register_; }
+
+  int ValueCount() const { return graph_->NodeCount(); }
+
+  int BasicBlockCount() const {
+    return static_cast<int>(schedule_->rpo_order()->size());
+  }
+
+  BasicBlock* BlockAt(int rpo_number) const {
+    return (*schedule_->rpo_order())[rpo_number];
+  }
+
+  BasicBlock* GetContainingLoop(BasicBlock* block) {
+    return block->loop_header_;
+  }
+
+  int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+
+  BasicBlock* GetBasicBlock(int instruction_index);
+
+  int GetVirtualRegister(Node* node) const { return node->id(); }
+
+  bool IsReference(int virtual_register) const;
+  bool IsDouble(int virtual_register) const;
+
+  void MarkAsReference(int virtual_register);
+  void MarkAsDouble(int virtual_register);
+
+  void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
+
+  Label* GetLabel(BasicBlock* block);
+  BlockStartInstruction* GetBlockStart(BasicBlock* block);
+
+  typedef InstructionDeque::const_iterator const_iterator;
+  const_iterator begin() const { return instructions_.begin(); }
+  const_iterator end() const { return instructions_.end(); }
+
+  GapInstruction* GapAt(int index) const {
+    return GapInstruction::cast(InstructionAt(index));
+  }
+  bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
+  Instruction* InstructionAt(int index) const {
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(instructions_.size()));
+    return instructions_[index];
+  }
+
+  Frame* frame() { return &frame_; }
+  Graph* graph() const { return graph_; }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return linkage_; }
+  Schedule* schedule() const { return schedule_; }
+  const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+  Zone* zone() const { return graph_->zone(); }
+
+  // Used by the code generator while adding instructions.
+  int AddInstruction(Instruction* instr, BasicBlock* block);
+  void StartBlock(BasicBlock* block);
+  void EndBlock(BasicBlock* block);
+
+  void AddConstant(int virtual_register, Constant constant) {
+    DCHECK(constants_.find(virtual_register) == constants_.end());
+    constants_.insert(std::make_pair(virtual_register, constant));
+  }
+  Constant GetConstant(int virtual_register) const {
+    ConstantMap::const_iterator it = constants_.find(virtual_register);
+    DCHECK(it != constants_.end());
+    DCHECK_EQ(virtual_register, it->first);
+    return it->second;
+  }
+
+  typedef ConstantDeque Immediates;
+  const Immediates& immediates() const { return immediates_; }
+
+  int AddImmediate(Constant constant) {
+    int index = static_cast<int>(immediates_.size());
+    immediates_.push_back(constant);
+    return index;
+  }
+  Constant GetImmediate(int index) const {
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(immediates_.size()));
+    return immediates_[index];
+  }
+
+  class StateId {
+   public:
+    static StateId FromInt(int id) { return StateId(id); }
+    int ToInt() const { return id_; }
+
+   private:
+    explicit StateId(int id) : id_(id) {}
+    int id_;
+  };
+
+  StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
+  FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
+  int GetFrameStateDescriptorCount();
+
+ private:
+  friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+  typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+
+  Graph* graph_;
+  Linkage* linkage_;
+  Schedule* schedule_;
+  ConstantMap constants_;
+  ConstantDeque immediates_;
+  InstructionDeque instructions_;
+  int next_virtual_register_;
+  PointerMapDeque pointer_maps_;
+  VirtualRegisterSet doubles_;
+  VirtualRegisterSet references_;
+  Frame frame_;
+  DeoptimizationVector deoptimization_entries_;
+};
+
+OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_H_
diff --git a/src/compiler/ir-operations.txt b/src/compiler/ir-operations.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/compiler/ir-operations.txt
diff --git a/src/compiler/js-builtin-reducer-unittest.cc b/src/compiler/js-builtin-reducer-unittest.cc
new file mode 100644
index 0000000..51561d0
--- /dev/null
+++ b/src/compiler/js-builtin-reducer-unittest.cc
@@ -0,0 +1,177 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public GraphTest {
+ public:
+  JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+    JSBuiltinReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* UndefinedConstant() {
+    return HeapConstant(
+        Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+  }
+
+  JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+  JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::OrderedNumber(),   Type::Number()};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+  Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+  Node* call = graph()->NewNode(javascript()->Call(2, NO_CALL_FUNCTION_FLAGS),
+                                fun, UndefinedConstant());
+  Reduction r = Reduce(call);
+
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), p0);
+  }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        Capture<Node*> branch;
+        ASSERT_TRUE(r.Changed());
+        EXPECT_THAT(
+            r.replacement(),
+            IsPhi(kMachNone, p1, p0,
+                  IsMerge(IsIfTrue(CaptureEq(&branch)),
+                          IsIfFalse(AllOf(CaptureEq(&branch),
+                                          IsBranch(IsNumberLessThan(p0, p1),
+                                                   graph()->start()))))));
+      } else {
+        ASSERT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+  Handle<JSFunction> f(isolate()->context()->math_imul_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        ASSERT_TRUE(r.Changed());
+        EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+      } else {
+        ASSERT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
new file mode 100644
index 0000000..c57ac33
--- /dev/null
+++ b/src/compiler/js-builtin-reducer.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Helper method that assumes replacement nodes are pure values that don't
+// produce an effect. Replaces {node} with {reduction} and relaxes effects.
+static Reduction ReplaceWithPureReduction(Node* node, Reduction reduction) {
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  return Reducer::NoChange();
+}
+
+
+// Helper class to access JSCallFunction nodes that are potential candidates
+// for reduction when they have a BuiltinFunctionId associated with them.
+class JSCallReduction {
+ public:
+  explicit JSCallReduction(Node* node) : node_(node) {}
+
+  // Determines whether the node is a JSCallFunction operation that targets a
+  // constant callee being a well-known builtin with a BuiltinFunctionId.
+  bool HasBuiltinFunctionId() {
+    if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->HasBuiltinFunctionId();
+  }
+
+  // Retrieves the BuiltinFunctionId as described above.
+  BuiltinFunctionId GetBuiltinFunctionId() {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->builtin_function_id();
+  }
+
+  // Determines whether the call takes zero inputs.
+  bool InputsMatchZero() { return GetJSCallArity() == 0; }
+
+  // Determines whether the call takes one input of the given type.
+  bool InputsMatchOne(Type* t1) {
+    return GetJSCallArity() == 1 &&
+           NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
+  }
+
+  // Determines whether the call takes two inputs of the given types.
+  bool InputsMatchTwo(Type* t1, Type* t2) {
+    return GetJSCallArity() == 2 &&
+           NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
+           NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
+  }
+
+  // Determines whether the call takes inputs all of the given type.
+  bool InputsMatchAll(Type* t) {
+    for (int i = 0; i < GetJSCallArity(); i++) {
+      if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  Node* left() { return GetJSCallInput(0); }
+  Node* right() { return GetJSCallInput(1); }
+
+  int GetJSCallArity() {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    // Skip first (i.e. callee) and second (i.e. receiver) operand.
+    return OperatorProperties::GetValueInputCount(node_->op()) - 2;
+  }
+
+  Node* GetJSCallInput(int index) {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_LT(index, GetJSCallArity());
+    // Skip first (i.e. callee) and second (i.e. receiver) operand.
+    return NodeProperties::GetValueInput(node_, index + 2);
+  }
+
+ private:
+  Node* node_;
+};
+
+
+// ECMA-262, section 15.8.2.17.
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.sqrt(a:number) -> Float64Sqrt(a)
+    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ECMA-262, section 15.8.2.11.
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchZero()) {
+    // Math.max() -> -Infinity
+    return Replace(jsgraph()->Constant(-V8_INFINITY));
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.max(a:number) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchAll(Type::Integral32())) {
+    // Math.max(a:int32, b:int32, ...)
+    Node* value = r.GetJSCallInput(0);
+    for (int i = 1; i < r.GetJSCallArity(); i++) {
+      Node* p = r.GetJSCallInput(i);
+      Node* control = graph()->start();
+      Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
+
+      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+      value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
+    }
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 08-24-14, section 20.2.2.19.
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
+    // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
+    Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+Reduction JSBuiltinReducer::Reduce(Node* node) {
+  JSCallReduction r(node);
+
+  // Dispatch according to the BuiltinFunctionId if present.
+  if (!r.HasBuiltinFunctionId()) return NoChange();
+  switch (r.GetBuiltinFunctionId()) {
+    case kMathSqrt:
+      return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
+    case kMathMax:
+      return ReplaceWithPureReduction(node, ReduceMathMax(node));
+    case kMathImul:
+      return ReplaceWithPureReduction(node, ReduceMathImul(node));
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
new file mode 100644
index 0000000..13927f6
--- /dev/null
+++ b/src/compiler/js-builtin-reducer.h
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
+#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducer FINAL : public Reducer {
+ public:
+  explicit JSBuiltinReducer(JSGraph* jsgraph)
+      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+  virtual ~JSBuiltinReducer() {}
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+  Reduction ReduceMathSqrt(Node* node);
+  Reduction ReduceMathMax(Node* node);
+  Reduction ReduceMathImul(Node* node);
+
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_BUILTIN_REDUCER_H_
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
new file mode 100644
index 0000000..cd8932b
--- /dev/null
+++ b/src/compiler/js-context-specialization.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ContextSpecializationVisitor : public NullNodeVisitor {
+ public:
+  explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
+      : spec_(spec) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kJSLoadContext: {
+        Reduction r = spec_->ReduceJSLoadContext(node);
+        if (r.Changed() && r.replacement() != node) {
+          NodeProperties::ReplaceWithValue(node, r.replacement());
+          node->RemoveAllInputs();
+        }
+        break;
+      }
+      case IrOpcode::kJSStoreContext: {
+        Reduction r = spec_->ReduceJSStoreContext(node);
+        if (r.Changed() && r.replacement() != node) {
+          NodeProperties::ReplaceWithValue(node, r.replacement());
+          node->RemoveAllInputs();
+        }
+        break;
+      }
+      default:
+        break;
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  JSContextSpecializer* spec_;
+};
+
+
+void JSContextSpecializer::SpecializeToContext() {
+  NodeProperties::ReplaceWithValue(context_,
+                                   jsgraph_->Constant(info_->context()));
+
+  ContextSpecializationVisitor visitor(this);
+  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+
+  HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+  // If the context is not constant, no reduction can occur.
+  if (!m.HasValue()) {
+    return Reducer::NoChange();
+  }
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+
+  // Find the right parent context.
+  Context* context = *m.Value().handle();
+  for (int i = access.depth(); i > 0; --i) {
+    context = context->previous();
+  }
+
+  // If the access itself is mutable, only fold-in the parent.
+  if (!access.immutable()) {
+    // The access does not have to look up a parent, nothing to fold.
+    if (access.depth() == 0) {
+      return Reducer::NoChange();
+    }
+    const Operator* op = jsgraph_->javascript()->LoadContext(
+        0, access.index(), access.immutable());
+    node->set_op(op);
+    Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
+    node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+    return Reducer::Changed(node);
+  }
+  Handle<Object> value =
+      Handle<Object>(context->get(access.index()), info_->isolate());
+
+  // Even though the context slot is immutable, the context might have escaped
+  // before the function to which it belongs has initialized the slot.
+  // We must be conservative and check if the value in the slot is currently the
+  // hole or undefined. If it is neither of these, then it must be initialized.
+  if (value->IsUndefined() || value->IsTheHole()) {
+    return Reducer::NoChange();
+  }
+
+  // Success. The context load can be replaced with the constant.
+  // TODO(titzer): record the specialization for sharing code across multiple
+  // contexts that have the same value in the corresponding context slot.
+  return Reducer::Replace(jsgraph_->Constant(value));
+}
+
+
+Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+
+  HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+  // If the context is not constant, no reduction can occur.
+  if (!m.HasValue()) {
+    return Reducer::NoChange();
+  }
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+
+  // The access does not have to look up a parent, nothing to fold.
+  if (access.depth() == 0) {
+    return Reducer::NoChange();
+  }
+
+  // Find the right parent context.
+  Context* context = *m.Value().handle();
+  for (int i = access.depth(); i > 0; --i) {
+    context = context->previous();
+  }
+
+  const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
+  node->set_op(op);
+  Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
+  node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
+
+  return Reducer::Changed(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
new file mode 100644
index 0000000..b8b50ed
--- /dev/null
+++ b/src/compiler/js-context-specialization.h
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/contexts.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Specializes a given JSGraph to a given context, potentially constant folding
+// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
+class JSContextSpecializer {
+ public:
+  JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
+      : info_(info), jsgraph_(jsgraph), context_(context) {}
+
+  void SpecializeToContext();
+  Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSStoreContext(Node* node);
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Node* context_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
new file mode 100644
index 0000000..300604e
--- /dev/null
+++ b/src/compiler/js-generic-lowering.cc
@@ -0,0 +1,403 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
+    : info_(info),
+      jsgraph_(jsgraph),
+      linkage_(new (jsgraph->zone()) Linkage(info)) {}
+
+
+void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
+  node->set_op(op);
+}
+
+
+void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
+  node->InsertInput(zone(), index, input);
+}
+
+
+Node* JSGenericLowering::SmiConstant(int32_t immediate) {
+  return jsgraph()->SmiConstant(immediate);
+}
+
+
+Node* JSGenericLowering::Int32Constant(int immediate) {
+  return jsgraph()->Int32Constant(immediate);
+}
+
+
+Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
+  return jsgraph()->HeapConstant(code);
+}
+
+
+Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
+  return jsgraph()->HeapConstant(function);
+}
+
+
+Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
+  return jsgraph()->ExternalConstant(ref);
+}
+
+
+Reduction JSGenericLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    Lower##x(node);     \
+    break;
+    DECLARE_CASE(Branch)
+    JS_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+    default:
+      // Nothing to see.
+      return NoChange();
+  }
+  return Changed(node);
+}
+
+
+#define REPLACE_BINARY_OP_IC_CALL(op, token)                             \
+  void JSGenericLowering::Lower##op(Node* node) {                        \
+    ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
+                        CallDescriptor::kPatchableCallSiteWithNop);      \
+  }
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
+REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
+REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
+REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
+REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
+REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
+REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
+#undef REPLACE_BINARY_OP_IC_CALL
+
+
+#define REPLACE_COMPARE_IC_CALL(op, token, pure)  \
+  void JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithCompareIC(node, token, pure);      \
+  }
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+#undef REPLACE_COMPARE_IC_CALL
+
+
+#define REPLACE_RUNTIME_CALL(op, fun)             \
+  void JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithRuntimeCall(node, fun);            \
+  }
+REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
+REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
+REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
+REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
+REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+#undef REPLACE_RUNTIME
+
+
+#define REPLACE_UNIMPLEMENTED(op) \
+  void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
+REPLACE_UNIMPLEMENTED(JSToName)
+REPLACE_UNIMPLEMENTED(JSYield)
+REPLACE_UNIMPLEMENTED(JSDebugger)
+#undef REPLACE_UNIMPLEMENTED
+
+
+static CallDescriptor::Flags FlagsForNode(Node* node) {
+  CallDescriptor::Flags result = CallDescriptor::kNoFlags;
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    result |= CallDescriptor::kNeedsFrameState;
+  }
+  return result;
+}
+
+
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+                                             bool pure) {
+  Callable callable = CodeFactory::CompareIC(isolate(), token);
+  bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
+  CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0,
+      CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+  NodeVector inputs(zone());
+  inputs.reserve(node->InputCount() + 1);
+  inputs.push_back(CodeConstant(callable.code()));
+  inputs.push_back(NodeProperties::GetValueInput(node, 0));
+  inputs.push_back(NodeProperties::GetValueInput(node, 1));
+  inputs.push_back(NodeProperties::GetContextInput(node));
+  if (pure) {
+    // A pure (strict) comparison doesn't have an effect, control or frame
+    // state.  But for the graph, we need to add control and effect inputs.
+    DCHECK(!has_frame_state);
+    inputs.push_back(graph()->start());
+    inputs.push_back(graph()->start());
+  } else {
+    DCHECK(has_frame_state == FLAG_turbo_deoptimization);
+    if (FLAG_turbo_deoptimization) {
+      inputs.push_back(NodeProperties::GetFrameStateInput(node));
+    }
+    inputs.push_back(NodeProperties::GetEffectInput(node));
+    inputs.push_back(NodeProperties::GetControlInput(node));
+  }
+  Node* compare =
+      graph()->NewNode(common()->Call(desc_compare),
+                       static_cast<int>(inputs.size()), &inputs.front());
+
+  node->ReplaceInput(0, compare);
+  node->ReplaceInput(1, SmiConstant(token));
+
+  if (has_frame_state) {
+    // Remove the frame state from inputs.
+    node->RemoveInput(NodeProperties::FirstFrameStateIndex(node));
+  }
+
+  ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+}
+
+
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+                                            CallDescriptor::Flags flags) {
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0, flags | FlagsForNode(node));
+  Node* stub_code = CodeConstant(callable.code());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+                                               Builtins::JavaScript id,
+                                               int nargs) {
+  Callable callable =
+      CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(callable.descriptor(), nargs);
+  // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
+  // of code across native contexts. Fix this by loading from given context.
+  Handle<JSFunction> function(
+      JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
+  Node* stub_code = CodeConstant(callable.code());
+  Node* function_node = FunctionConstant(function);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, function_node);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
+                                               Runtime::FunctionId f,
+                                               int nargs_override) {
+  Operator::Properties properties = node->op()->properties();
+  const Runtime::Function* fun = Runtime::FunctionForId(f);
+  int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
+  CallDescriptor* desc =
+      linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
+  Node* ref = ExternalConstant(ExternalReference(f, isolate()));
+  Node* arity = Int32Constant(nargs);
+  if (!centrystub_constant_.is_set()) {
+    centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
+  }
+  PatchInsertInput(node, 0, centrystub_constant_.get());
+  PatchInsertInput(node, nargs + 1, ref);
+  PatchInsertInput(node, nargs + 2, arity);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerBranch(Node* node) {
+  if (!info()->is_typing_enabled()) {
+    // TODO(mstarzinger): If typing is enabled then simplified lowering will
+    // have inserted the correct ChangeBoolToBit, otherwise we need to perform
+    // poor-man's representation inference here and insert manual change.
+    Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                                  jsgraph()->TrueConstant());
+    node->ReplaceInput(0, test);
+  }
+}
+
+
+void JSGenericLowering::LowerJSUnaryNot(Node* node) {
+  Callable callable = CodeFactory::ToBoolean(
+      isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+  Callable callable =
+      CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSToNumber(Node* node) {
+  Callable callable = CodeFactory::ToNumber(isolate());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags);
+}
+
+
+void JSGenericLowering::LowerJSToString(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::TO_STRING, 1);
+}
+
+
+void JSGenericLowering::LowerJSToObject(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+}
+
+
+void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+  Callable callable = CodeFactory::KeyedLoadIC(isolate());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+  LoadNamedParameters p = OpParameter<LoadNamedParameters>(node);
+  Callable callable = CodeFactory::LoadIC(isolate(), p.contextual_mode);
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name));
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  StrictMode strict_mode = OpParameter<StrictMode>(node);
+  Callable callable = CodeFactory::KeyedStoreIC(isolate(), strict_mode);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  StoreNamedParameters params = OpParameter<StoreNamedParameters>(node);
+  Callable callable = CodeFactory::StoreIC(isolate(), params.strict_mode);
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(params.name));
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
+  StrictMode strict_mode = OpParameter<StrictMode>(node);
+  PatchInsertInput(node, 2, SmiConstant(strict_mode));
+  ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+}
+
+
+void JSGenericLowering::LowerJSHasProperty(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+}
+
+
+void JSGenericLowering::LowerJSInstanceOf(Node* node) {
+  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+      InstanceofStub::kReturnTrueFalseObject |
+      InstanceofStub::kArgsInRegisters);
+  InstanceofStub stub(isolate(), flags);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSLoadContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  // TODO(mstarzinger): Use simplified operators instead of machine operators
+  // here so that load/store optimization can be applied afterwards.
+  for (int i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               machine()->Load(kMachAnyTagged),
+               NodeProperties::GetValueInput(node, 0),
+               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetEffectInput(node)));
+  }
+  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  PatchOperator(node, machine()->Load(kMachAnyTagged));
+}
+
+
+void JSGenericLowering::LowerJSStoreContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  // TODO(mstarzinger): Use simplified operators instead of machine operators
+  // here so that load/store optimization can be applied afterwards.
+  for (int i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               machine()->Load(kMachAnyTagged),
+               NodeProperties::GetValueInput(node, 0),
+               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetEffectInput(node)));
+  }
+  node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
+  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
+                                                           kFullWriteBarrier)));
+}
+
+
+void JSGenericLowering::LowerJSCallConstruct(Node* node) {
+  int arity = OpParameter<int>(node);
+  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
+  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* construct = NodeProperties::GetValueInput(node, 0);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, Int32Constant(arity - 1));
+  PatchInsertInput(node, 2, construct);
+  PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSCallFunction(Node* node) {
+  CallParameters p = OpParameter<CallParameters>(node);
+  CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(d, p.arity - 1, FlagsForNode(node));
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSCallRuntime(Node* node) {
+  Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
+  int arity = OperatorProperties::GetValueInputCount(node->op());
+  ReplaceWithRuntimeCall(node, function, arity);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
new file mode 100644
index 0000000..400f806
--- /dev/null
+++ b/src/compiler/js-generic-lowering.h
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
+#define V8_COMPILER_JS_GENERIC_LOWERING_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class MachineOperatorBuilder;
+class Linkage;
+
+// Lowers JS-level operators to runtime and IC calls in the "generic" case.
+class JSGenericLowering : public Reducer {
+ public:
+  JSGenericLowering(CompilationInfo* info, JSGraph* graph);
+  virtual ~JSGenericLowering() {}
+
+  virtual Reduction Reduce(Node* node);
+
+ protected:
+#define DECLARE_LOWER(x) void Lower##x(Node* node);
+  // Dispatched depending on opcode.
+  ALL_OP_LIST(DECLARE_LOWER)
+#undef DECLARE_LOWER
+
+  // Helpers to create new constant nodes.
+  Node* SmiConstant(int immediate);
+  Node* Int32Constant(int immediate);
+  Node* CodeConstant(Handle<Code> code);
+  Node* FunctionConstant(Handle<JSFunction> function);
+  Node* ExternalConstant(ExternalReference ref);
+
+  // Helpers to patch existing nodes in the graph.
+  void PatchOperator(Node* node, const Operator* new_op);
+  void PatchInsertInput(Node* node, int index, Node* input);
+
+  // Helpers to replace existing nodes with a generic call.
+  void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+  void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
+  void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
+  void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph()->graph(); }
+  Linkage* linkage() const { return linkage_; }
+  CompilationInfo* info() const { return info_; }
+  CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Linkage* linkage_;
+  SetOncePointer<Node> centrystub_constant_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
new file mode 100644
index 0000000..1309531
--- /dev/null
+++ b/src/compiler/js-graph.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
+  Unique<Object> unique = Unique<Object>::CreateImmovable(object);
+  return NewNode(common()->HeapConstant(unique));
+}
+
+
+Node* JSGraph::NewNode(const Operator* op) {
+  Node* node = graph()->NewNode(op);
+  typer_->Init(node);
+  return node;
+}
+
+
+Node* JSGraph::CEntryStubConstant() {
+  if (!c_entry_stub_constant_.is_set()) {
+    c_entry_stub_constant_.set(
+        ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+  }
+  return c_entry_stub_constant_.get();
+}
+
+
+Node* JSGraph::UndefinedConstant() {
+  if (!undefined_constant_.is_set()) {
+    undefined_constant_.set(
+        ImmovableHeapConstant(factory()->undefined_value()));
+  }
+  return undefined_constant_.get();
+}
+
+
+Node* JSGraph::TheHoleConstant() {
+  if (!the_hole_constant_.is_set()) {
+    the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
+  }
+  return the_hole_constant_.get();
+}
+
+
+Node* JSGraph::TrueConstant() {
+  if (!true_constant_.is_set()) {
+    true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
+  }
+  return true_constant_.get();
+}
+
+
+Node* JSGraph::FalseConstant() {
+  if (!false_constant_.is_set()) {
+    false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
+  }
+  return false_constant_.get();
+}
+
+
+Node* JSGraph::NullConstant() {
+  if (!null_constant_.is_set()) {
+    null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
+  }
+  return null_constant_.get();
+}
+
+
+Node* JSGraph::ZeroConstant() {
+  if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
+  return zero_constant_.get();
+}
+
+
+Node* JSGraph::OneConstant() {
+  if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
+  return one_constant_.get();
+}
+
+
+Node* JSGraph::NaNConstant() {
+  if (!nan_constant_.is_set()) {
+    nan_constant_.set(NumberConstant(base::OS::nan_value()));
+  }
+  return nan_constant_.get();
+}
+
+
+Node* JSGraph::HeapConstant(Unique<Object> value) {
+  // TODO(turbofan): canonicalize heap constants using Unique<T>
+  return NewNode(common()->HeapConstant(value));
+}
+
+
+Node* JSGraph::HeapConstant(Handle<Object> value) {
+  // TODO(titzer): We could also match against the addresses of immortable
+  // immovables here, even without access to the heap, thus always
+  // canonicalizing references to them.
+  // return HeapConstant(Unique<Object>::CreateUninitialized(value));
+  // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
+  // value numbering. We need some sane way to compute a unique hash code for
+  // arbitrary handles here.
+  Unique<Object> unique(reinterpret_cast<Address>(*value.location()), value);
+  return HeapConstant(unique);
+}
+
+
+Node* JSGraph::Constant(Handle<Object> value) {
+  // Dereference the handle to determine if a number constant or other
+  // canonicalized node can be used.
+  if (value->IsNumber()) {
+    return Constant(value->Number());
+  } else if (value->IsUndefined()) {
+    return UndefinedConstant();
+  } else if (value->IsTrue()) {
+    return TrueConstant();
+  } else if (value->IsFalse()) {
+    return FalseConstant();
+  } else if (value->IsNull()) {
+    return NullConstant();
+  } else if (value->IsTheHole()) {
+    return TheHoleConstant();
+  } else {
+    return HeapConstant(value);
+  }
+}
+
+
+Node* JSGraph::Constant(double value) {
+  if (bit_cast<int64_t>(value) == bit_cast<int64_t>(0.0)) return ZeroConstant();
+  if (bit_cast<int64_t>(value) == bit_cast<int64_t>(1.0)) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Constant(int32_t value) {
+  if (value == 0) return ZeroConstant();
+  if (value == 1) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Int32Constant(int32_t value) {
+  Node** loc = cache_.FindInt32Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Int32Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::NumberConstant(double value) {
+  Node** loc = cache_.FindNumberConstant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->NumberConstant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::Float64Constant(double value) {
+  Node** loc = cache_.FindFloat64Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Float64Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::ExternalConstant(ExternalReference reference) {
+  Node** loc = cache_.FindExternalConstant(reference);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->ExternalConstant(reference));
+  }
+  return *loc;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
new file mode 100644
index 0000000..2b2dfd1
--- /dev/null
+++ b/src/compiler/js-graph.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GRAPH_H_
+#define V8_COMPILER_JS_GRAPH_H_
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer;
+
+// Implements a facade on a Graph, enhancing the graph with JS-specific
+// notions, including a builder for for JS* operators, canonicalized global
+// constants, and various helper methods.
+class JSGraph : public ZoneObject {
+ public:
+  JSGraph(Graph* graph, CommonOperatorBuilder* common,
+          JSOperatorBuilder* javascript, Typer* typer,
+          MachineOperatorBuilder* machine)
+      : graph_(graph),
+        common_(common),
+        javascript_(javascript),
+        typer_(typer),
+        machine_(machine),
+        cache_(zone()) {}
+
+  // Canonicalized global constants.
+  Node* CEntryStubConstant();
+  Node* UndefinedConstant();
+  Node* TheHoleConstant();
+  Node* TrueConstant();
+  Node* FalseConstant();
+  Node* NullConstant();
+  Node* ZeroConstant();
+  Node* OneConstant();
+  Node* NaNConstant();
+
+  // Creates a HeapConstant node, possibly canonicalized, without inspecting the
+  // object.
+  Node* HeapConstant(Unique<Object> value);
+
+  // Creates a HeapConstant node, possibly canonicalized, and may access the
+  // heap to inspect the object.
+  Node* HeapConstant(Handle<Object> value);
+
+  // Creates a Constant node of the appropriate type for the given object.
+  // Accesses the heap to inspect the object and determine whether one of the
+  // canonicalized globals or a number constant should be returned.
+  Node* Constant(Handle<Object> value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(double value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(int32_t value);
+
+  // Creates a Int32Constant node, usually canonicalized.
+  Node* Int32Constant(int32_t value);
+  Node* Uint32Constant(uint32_t value) {
+    return Int32Constant(bit_cast<int32_t>(value));
+  }
+
+  // Creates a Float64Constant node, usually canonicalized.
+  Node* Float64Constant(double value);
+
+  // Creates an ExternalConstant node, usually canonicalized.
+  Node* ExternalConstant(ExternalReference ref);
+
+  Node* SmiConstant(int32_t immediate) {
+    DCHECK(Smi::IsValid(immediate));
+    return Constant(immediate);
+  }
+
+  JSOperatorBuilder* javascript() { return javascript_; }
+  CommonOperatorBuilder* common() { return common_; }
+  MachineOperatorBuilder* machine() { return machine_; }
+  Graph* graph() { return graph_; }
+  Zone* zone() { return graph()->zone(); }
+  Isolate* isolate() { return zone()->isolate(); }
+
+ private:
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
+  JSOperatorBuilder* javascript_;
+  Typer* typer_;
+  MachineOperatorBuilder* machine_;
+
+  SetOncePointer<Node> c_entry_stub_constant_;
+  SetOncePointer<Node> undefined_constant_;
+  SetOncePointer<Node> the_hole_constant_;
+  SetOncePointer<Node> true_constant_;
+  SetOncePointer<Node> false_constant_;
+  SetOncePointer<Node> null_constant_;
+  SetOncePointer<Node> zero_constant_;
+  SetOncePointer<Node> one_constant_;
+  SetOncePointer<Node> nan_constant_;
+
+  CommonNodeCache cache_;
+
+  Node* ImmovableHeapConstant(Handle<Object> value);
+  Node* NumberConstant(double value);
+  Node* NewNode(const Operator* op);
+
+  Factory* factory() { return isolate()->factory(); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
new file mode 100644
index 0000000..af02145
--- /dev/null
+++ b/src/compiler/js-inlining.cc
@@ -0,0 +1,446 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InlinerVisitor : public NullNodeVisitor {
+ public:
+  explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kJSCallFunction:
+        inliner_->TryInlineCall(node);
+        break;
+      default:
+        break;
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  JSInliner* inliner_;
+};
+
+
+void JSInliner::Inline() {
+  InlinerVisitor visitor(this);
+  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(sigurds) Find a home for this function and reuse it everywhere (esp. in
+// test cases, where similar code is currently duplicated).
+static void Parse(Handle<JSFunction> function, CompilationInfoWithZone* info) {
+  CHECK(Parser::Parse(info));
+  CHECK(Rewriter::Rewrite(info));
+  CHECK(Scope::Analyze(info));
+  CHECK(Compiler::EnsureDeoptimizationSupport(info));
+}
+
+
+// A facade on a JSFunction's graph to facilitate inlining. It assumes the
+// that the function graph has only one return statement, and provides
+// {UnifyReturn} to convert a function graph to that end.
+class Inlinee {
+ public:
+  Inlinee(Node* start, Node* end) : start_(start), end_(end) {}
+
+  // Returns the last regular control node, that is
+  // the last control node before the end node.
+  Node* end_block() { return NodeProperties::GetControlInput(unique_return()); }
+
+  // Return the effect output of the graph,
+  // that is the effect input of the return statement of the inlinee.
+  Node* effect_output() {
+    return NodeProperties::GetEffectInput(unique_return());
+  }
+  // Return the value output of the graph,
+  // that is the value input of the return statement of the inlinee.
+  Node* value_output() {
+    return NodeProperties::GetValueInput(unique_return(), 0);
+  }
+  // Return the unique return statement of the graph.
+  Node* unique_return() {
+    Node* unique_return = NodeProperties::GetControlInput(end_);
+    DCHECK_EQ(IrOpcode::kReturn, unique_return->opcode());
+    return unique_return;
+  }
+
+  // Counts JSFunction, Receiver, arguments, context but not effect, control.
+  size_t total_parameters() { return start_->op()->OutputCount(); }
+
+  // Counts only formal parameters.
+  size_t formal_parameters() {
+    DCHECK_GE(total_parameters(), 3);
+    return total_parameters() - 3;
+  }
+
+  // Inline this graph at {call}, use {jsgraph} and its zone to create
+  // any new nodes.
+  void InlineAtCall(JSGraph* jsgraph, Node* call);
+
+  // Ensure that only a single return reaches the end node.
+  static void UnifyReturn(JSGraph* jsgraph);
+
+ private:
+  Node* start_;
+  Node* end_;
+};
+
+
+void Inlinee::UnifyReturn(JSGraph* jsgraph) {
+  Graph* graph = jsgraph->graph();
+
+  Node* final_merge = NodeProperties::GetControlInput(graph->end(), 0);
+  if (final_merge->opcode() == IrOpcode::kReturn) {
+    // nothing to do
+    return;
+  }
+  DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
+
+  int predecessors =
+      OperatorProperties::GetControlInputCount(final_merge->op());
+
+  const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
+  const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
+
+  NodeVector values(jsgraph->zone());
+  NodeVector effects(jsgraph->zone());
+  // Iterate over all control flow predecessors,
+  // which must be return statements.
+  InputIter iter = final_merge->inputs().begin();
+  while (iter != final_merge->inputs().end()) {
+    Node* input = *iter;
+    switch (input->opcode()) {
+      case IrOpcode::kReturn:
+        values.push_back(NodeProperties::GetValueInput(input, 0));
+        effects.push_back(NodeProperties::GetEffectInput(input));
+        iter.UpdateToAndIncrement(NodeProperties::GetControlInput(input));
+        input->RemoveAllInputs();
+        break;
+      default:
+        UNREACHABLE();
+        ++iter;
+        break;
+    }
+  }
+  values.push_back(final_merge);
+  effects.push_back(final_merge);
+  Node* phi =
+      graph->NewNode(op_phi, static_cast<int>(values.size()), &values.front());
+  Node* ephi = graph->NewNode(op_ephi, static_cast<int>(effects.size()),
+                              &effects.front());
+  Node* new_return =
+      graph->NewNode(jsgraph->common()->Return(), phi, ephi, final_merge);
+  graph->end()->ReplaceInput(0, new_return);
+}
+
+
+class CopyVisitor : public NullNodeVisitor {
+ public:
+  CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
+      : copies_(source_graph->NodeCount(), NULL, temp_zone),
+        sentinels_(source_graph->NodeCount(), NULL, temp_zone),
+        source_graph_(source_graph),
+        target_graph_(target_graph),
+        temp_zone_(temp_zone),
+        sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, 0, 0,
+                     "sentinel") {}
+
+  GenericGraphVisit::Control Post(Node* original) {
+    NodeVector inputs(temp_zone_);
+    for (InputIter it = original->inputs().begin();
+         it != original->inputs().end(); ++it) {
+      inputs.push_back(GetCopy(*it));
+    }
+
+    // Reuse the operator in the copy. This assumes that op lives in a zone
+    // that lives longer than graph()'s zone.
+    Node* copy =
+        target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
+                               (inputs.empty() ? NULL : &inputs.front()));
+    copies_[original->id()] = copy;
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  Node* GetCopy(Node* original) {
+    Node* copy = copies_[original->id()];
+    if (copy == NULL) {
+      copy = GetSentinel(original);
+    }
+    DCHECK_NE(NULL, copy);
+    return copy;
+  }
+
+  void CopyGraph() {
+    source_graph_->VisitNodeInputsFromEnd(this);
+    ReplaceSentinels();
+  }
+
+  const NodeVector& copies() { return copies_; }
+
+ private:
+  void ReplaceSentinels() {
+    for (NodeId id = 0; id < source_graph_->NodeCount(); ++id) {
+      Node* sentinel = sentinels_[id];
+      if (sentinel == NULL) continue;
+      Node* copy = copies_[id];
+      DCHECK_NE(NULL, copy);
+      sentinel->ReplaceUses(copy);
+    }
+  }
+
+  Node* GetSentinel(Node* original) {
+    Node* sentinel = sentinels_[original->id()];
+    if (sentinel == NULL) {
+      sentinel = target_graph_->NewNode(&sentinel_op_);
+    }
+    return sentinel;
+  }
+
+  NodeVector copies_;
+  NodeVector sentinels_;
+  Graph* source_graph_;
+  Graph* target_graph_;
+  Zone* temp_zone_;
+  SimpleOperator sentinel_op_;
+};
+
+
+void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+  // The scheduler is smart enough to place our code; we just ensure {control}
+  // becomes the control input of the start of the inlinee.
+  Node* control = NodeProperties::GetControlInput(call);
+
+  // The inlinee uses the context from the JSFunction object. This will
+  // also be the effect dependency for the inlinee as it produces an effect.
+  SimplifiedOperatorBuilder simplified(jsgraph->zone());
+  Node* context = jsgraph->graph()->NewNode(
+      simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
+      NodeProperties::GetValueInput(call, 0),
+      NodeProperties::GetEffectInput(call));
+
+  // Context is last argument.
+  int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
+  // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
+  // context, effect, control.
+  int inliner_inputs = OperatorProperties::GetValueInputCount(call->op());
+  // Iterate over all uses of the start node.
+  UseIter iter = start_->uses().begin();
+  while (iter != start_->uses().end()) {
+    Node* use = *iter;
+    switch (use->opcode()) {
+      case IrOpcode::kParameter: {
+        int index = 1 + OpParameter<int>(use->op());
+        if (index < inliner_inputs && index < inlinee_context_index) {
+          // There is an input from the call, and the index is a value
+          // projection but not the context, so rewire the input.
+          NodeProperties::ReplaceWithValue(*iter, call->InputAt(index));
+        } else if (index == inlinee_context_index) {
+          // This is the context projection, rewire it to the context from the
+          // JSFunction object.
+          NodeProperties::ReplaceWithValue(*iter, context);
+        } else if (index < inlinee_context_index) {
+          // Call has fewer arguments than required, fill with undefined.
+          NodeProperties::ReplaceWithValue(*iter, jsgraph->UndefinedConstant());
+        } else {
+          // We got too many arguments, discard for now.
+          // TODO(sigurds): Fix to treat arguments array correctly.
+        }
+        ++iter;
+        break;
+      }
+      default:
+        if (NodeProperties::IsEffectEdge(iter.edge())) {
+          iter.UpdateToAndIncrement(context);
+        } else if (NodeProperties::IsControlEdge(iter.edge())) {
+          iter.UpdateToAndIncrement(control);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+    }
+  }
+
+  // Iterate over all uses of the call node.
+  iter = call->uses().begin();
+  while (iter != call->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      iter.UpdateToAndIncrement(effect_output());
+    } else if (NodeProperties::IsControlEdge(iter.edge())) {
+      UNREACHABLE();
+    } else {
+      DCHECK(NodeProperties::IsValueEdge(iter.edge()));
+      iter.UpdateToAndIncrement(value_output());
+    }
+  }
+  call->RemoveAllInputs();
+  DCHECK_EQ(0, call->UseCount());
+  // TODO(sigurds) Remove this once we copy.
+  unique_return()->RemoveAllInputs();
+}
+
+
+// TODO(turbofan) Provide such accessors for every node, possibly even
+// generate them.
+class JSCallFunctionAccessor {
+ public:
+  explicit JSCallFunctionAccessor(Node* call) : call_(call) {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+  }
+
+  Node* jsfunction() { return call_->InputAt(0); }
+
+  Node* receiver() { return call_->InputAt(1); }
+
+  Node* formal_argument(size_t index) {
+    DCHECK(index < formal_arguments());
+    return call_->InputAt(static_cast<int>(2 + index));
+  }
+
+  size_t formal_arguments() {
+    // {value_inputs} includes jsfunction and receiver.
+    size_t value_inputs = OperatorProperties::GetValueInputCount(call_->op());
+    DCHECK_GE(call_->InputCount(), 2);
+    return value_inputs - 2;
+  }
+
+  Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
+
+ private:
+  Node* call_;
+};
+
+
+void JSInliner::AddClosureToFrameState(Node* frame_state,
+                                       Handle<JSFunction> jsfunction) {
+  FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
+  const Operator* op = jsgraph_->common()->FrameState(
+      FrameStateType::JS_FRAME, call_info.bailout_id(),
+      call_info.state_combine(), jsfunction);
+  frame_state->set_op(op);
+}
+
+
+Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+                                                  Handle<JSFunction> jsfunction,
+                                                  Zone* temp_zone) {
+  const Operator* op =
+      jsgraph_->common()->FrameState(FrameStateType::ARGUMENTS_ADAPTOR,
+                                     BailoutId(-1), kIgnoreOutput, jsfunction);
+  const Operator* op0 = jsgraph_->common()->StateValues(0);
+  Node* node0 = jsgraph_->graph()->NewNode(op0);
+  NodeVector params(temp_zone);
+  params.push_back(call->receiver());
+  for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
+    params.push_back(call->formal_argument(argument));
+  }
+  const Operator* op_param =
+      jsgraph_->common()->StateValues(static_cast<int>(params.size()));
+  Node* params_node = jsgraph_->graph()->NewNode(
+      op_param, static_cast<int>(params.size()), &params.front());
+  return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
+                                    jsgraph_->UndefinedConstant(),
+                                    call->frame_state());
+}
+
+
+void JSInliner::TryInlineCall(Node* call_node) {
+  JSCallFunctionAccessor call(call_node);
+
+  HeapObjectMatcher<JSFunction> match(call.jsfunction());
+  if (!match.HasValue()) {
+    return;
+  }
+
+  Handle<JSFunction> function = match.Value().handle();
+
+  if (function->shared()->native()) {
+    if (FLAG_trace_turbo_inlining) {
+      SmartArrayPointer<char> name =
+          function->shared()->DebugName()->ToCString();
+      PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
+             info_->shared_info()->DebugName()->ToCString().get());
+    }
+    return;
+  }
+
+  CompilationInfoWithZone info(function);
+  Parse(function, &info);
+
+  if (info.scope()->arguments() != NULL) {
+    // For now do not inline functions that use their arguments array.
+    SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+    if (FLAG_trace_turbo_inlining) {
+      PrintF(
+          "Not Inlining %s into %s because inlinee uses arguments "
+          "array\n",
+          name.get(), info_->shared_info()->DebugName()->ToCString().get());
+    }
+    return;
+  }
+
+  if (FLAG_trace_turbo_inlining) {
+    SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+    PrintF("Inlining %s into %s\n", name.get(),
+           info_->shared_info()->DebugName()->ToCString().get());
+  }
+
+  Graph graph(info.zone());
+  Typer typer(info.zone());
+  JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(), &typer,
+                  jsgraph_->machine());
+
+  AstGraphBuilder graph_builder(&info, &jsgraph);
+  graph_builder.CreateGraph();
+  Inlinee::UnifyReturn(&jsgraph);
+
+  CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
+  visitor.CopyGraph();
+
+  Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
+
+  Node* outer_frame_state = call.frame_state();
+  // Insert argument adaptor frame if required.
+  if (call.formal_arguments() != inlinee.formal_parameters()) {
+    outer_frame_state =
+        CreateArgumentsAdaptorFrameState(&call, function, info.zone());
+  }
+
+  for (NodeVectorConstIter it = visitor.copies().begin();
+       it != visitor.copies().end(); ++it) {
+    Node* node = *it;
+    if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+      AddClosureToFrameState(node, function);
+      NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+    }
+  }
+
+  inlinee.InlineAtCall(jsgraph_, call_node);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
new file mode 100644
index 0000000..f135170
--- /dev/null
+++ b/src/compiler/js-inlining.h
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_H_
+#define V8_COMPILER_JS_INLINING_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSCallFunctionAccessor;
+
+class JSInliner {
+ public:
+  JSInliner(CompilationInfo* info, JSGraph* jsgraph)
+      : info_(info), jsgraph_(jsgraph) {}
+
+  void Inline();
+  void TryInlineCall(Node* node);
+
+ private:
+  friend class InlinerVisitor;
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+
+  Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+                                         Handle<JSFunction> jsfunction,
+                                         Zone* temp_zone);
+  void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
+  static void UnifyReturn(Graph* graph);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_INLINING_H_
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
new file mode 100644
index 0000000..b95467f
--- /dev/null
+++ b/src/compiler/js-operator.h
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_OPERATOR_H_
+#define V8_COMPILER_JS_OPERATOR_H_
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Defines the location of a context slot relative to a specific scope. This is
+// used as a parameter by JSLoadContext and JSStoreContext operators and allows
+// accessing a context-allocated variable without keeping track of the scope.
+class ContextAccess {
+ public:
+  ContextAccess(int depth, int index, bool immutable)
+      : immutable_(immutable), depth_(depth), index_(index) {
+    DCHECK(0 <= depth && depth <= kMaxUInt16);
+    DCHECK(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
+  }
+  int depth() const { return depth_; }
+  int index() const { return index_; }
+  bool immutable() const { return immutable_; }
+
+ private:
+  // For space reasons, we keep this tightly packed, otherwise we could just use
+  // a simple int/int/bool POD.
+  const bool immutable_;
+  const uint16_t depth_;
+  const uint32_t index_;
+};
+
+// Defines the property being loaded from an object by a named load. This is
+// used as a parameter by JSLoadNamed operators.
+struct LoadNamedParameters {
+  Unique<Name> name;
+  ContextualMode contextual_mode;
+};
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCall operators.
+struct CallParameters {
+  int arity;
+  CallFunctionFlags flags;
+};
+
+// Defines the property being stored to an object by a named store. This is
+// used as a parameter by JSStoreNamed operators.
+struct StoreNamedParameters {
+  StrictMode strict_mode;
+  Unique<Name> name;
+};
+
+// Interface for building JavaScript-level operators, e.g. directly from the
+// AST. Most operators have no parameters, thus can be globally shared for all
+// graphs.
+class JSOperatorBuilder {
+ public:
+  explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+  return new (zone_)                              \
+      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define NOPROPS(name, inputs, outputs) \
+  SIMPLE(name, Operator::kNoProperties, inputs, outputs)
+
+#define OP1(name, ptype, pname, properties, inputs, outputs)                 \
+  return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
+                                      outputs, #name, pname)
+
+#define BINOP(name) NOPROPS(name, 2, 1)
+#define UNOP(name) NOPROPS(name, 1, 1)
+
+#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+  const Operator* Equal() { BINOP(JSEqual); }
+  const Operator* NotEqual() { BINOP(JSNotEqual); }
+  const Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
+  const Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
+  const Operator* LessThan() { BINOP(JSLessThan); }
+  const Operator* GreaterThan() { BINOP(JSGreaterThan); }
+  const Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
+  const Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
+  const Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
+  const Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
+  const Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
+  const Operator* ShiftLeft() { BINOP(JSShiftLeft); }
+  const Operator* ShiftRight() { BINOP(JSShiftRight); }
+  const Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
+  const Operator* Add() { BINOP(JSAdd); }
+  const Operator* Subtract() { BINOP(JSSubtract); }
+  const Operator* Multiply() { BINOP(JSMultiply); }
+  const Operator* Divide() { BINOP(JSDivide); }
+  const Operator* Modulus() { BINOP(JSModulus); }
+
+  const Operator* UnaryNot() { UNOP(JSUnaryNot); }
+  const Operator* ToBoolean() { UNOP(JSToBoolean); }
+  const Operator* ToNumber() { UNOP(JSToNumber); }
+  const Operator* ToString() { UNOP(JSToString); }
+  const Operator* ToName() { UNOP(JSToName); }
+  const Operator* ToObject() { UNOP(JSToObject); }
+  const Operator* Yield() { UNOP(JSYield); }
+
+  const Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+
+  const Operator* Call(int arguments, CallFunctionFlags flags) {
+    CallParameters parameters = {arguments, flags};
+    OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
+        arguments, 1);
+  }
+
+  const Operator* CallNew(int arguments) {
+    return new (zone_)
+        Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
+                       arguments, 1, "JSCallConstruct", arguments);
+  }
+
+  const Operator* LoadProperty() { BINOP(JSLoadProperty); }
+  const Operator* LoadNamed(Unique<Name> name,
+                            ContextualMode contextual_mode = NOT_CONTEXTUAL) {
+    LoadNamedParameters parameters = {name, contextual_mode};
+    OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties,
+        1, 1);
+  }
+
+  const Operator* StoreProperty(StrictMode strict_mode) {
+    OP1(JSStoreProperty, StrictMode, strict_mode, Operator::kNoProperties, 3,
+        0);
+  }
+
+  const Operator* StoreNamed(StrictMode strict_mode, Unique<Name> name) {
+    StoreNamedParameters parameters = {strict_mode, name};
+    OP1(JSStoreNamed, StoreNamedParameters, parameters, Operator::kNoProperties,
+        2, 0);
+  }
+
+  const Operator* DeleteProperty(StrictMode strict_mode) {
+    OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
+        1);
+  }
+
+  const Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
+
+  const Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
+    ContextAccess access(depth, index, immutable);
+    OP1(JSLoadContext, ContextAccess, access,
+        Operator::kEliminatable | Operator::kNoWrite, 1, 1);
+  }
+  const Operator* StoreContext(uint16_t depth, uint32_t index) {
+    ContextAccess access(depth, index, false);
+    OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 0);
+  }
+
+  const Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
+  const Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
+  const Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+
+  // TODO(titzer): nail down the static parts of each of these context flavors.
+  const Operator* CreateFunctionContext() {
+    NOPROPS(JSCreateFunctionContext, 1, 1);
+  }
+  const Operator* CreateCatchContext(Unique<String> name) {
+    OP1(JSCreateCatchContext, Unique<String>, name, Operator::kNoProperties, 1,
+        1);
+  }
+  const Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
+  const Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
+  const Operator* CreateModuleContext() {
+    NOPROPS(JSCreateModuleContext, 2, 1);
+  }
+  const Operator* CreateGlobalContext() {
+    NOPROPS(JSCreateGlobalContext, 2, 1);
+  }
+
+  const Operator* Runtime(Runtime::FunctionId function, int arguments) {
+    const Runtime::Function* f = Runtime::FunctionForId(function);
+    DCHECK(f->nargs == -1 || f->nargs == arguments);
+    OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
+        arguments, f->result_size);
+  }
+
+#undef SIMPLE
+#undef NOPROPS
+#undef OP1
+#undef BINOP
+#undef UNOP
+
+ private:
+  Zone* zone_;
+};
+
+// Specialization for static parameters of type {ContextAccess}.
+template <>
+struct StaticParameterTraits<ContextAccess> {
+  static OStream& PrintTo(OStream& os, ContextAccess val) {  // NOLINT
+    return os << val.depth() << "," << val.index()
+              << (val.immutable() ? ",imm" : "");
+  }
+  static int HashCode(ContextAccess val) {
+    return (val.depth() << 16) | (val.index() & 0xffff);
+  }
+  static bool Equals(ContextAccess a, ContextAccess b) {
+    return a.immutable() == b.immutable() && a.depth() == b.depth() &&
+           a.index() == b.index();
+  }
+};
+
+// Specialization for static parameters of type {Runtime::FunctionId}.
+template <>
+struct StaticParameterTraits<Runtime::FunctionId> {
+  static OStream& PrintTo(OStream& os, Runtime::FunctionId val) {  // NOLINT
+    const Runtime::Function* f = Runtime::FunctionForId(val);
+    return os << (f->name ? f->name : "?Runtime?");
+  }
+  static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
+  static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
+    return a == b;
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_OPERATOR_H_
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
new file mode 100644
index 0000000..be12534
--- /dev/null
+++ b/src/compiler/js-typed-lowering.cc
@@ -0,0 +1,710 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+// - relax effects for ToNumber(mixed)
+
+
+// Relax the effects of {node} by immediately replacing effect uses of {node}
+// with the effect input to {node}.
+// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+// TODO(titzer): move into a GraphEditor?
+static void RelaxEffects(Node* node) {
+  NodeProperties::ReplaceWithValue(node, node, NULL);
+}
+
+
+JSTypedLowering::~JSTypedLowering() {}
+
+
+Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
+  NodeProperties::ReplaceWithValue(old, node, node);
+  return Changed(node);
+}
+
+
+// A helper class to simplify the process of reducing a single binop node with a
+// JSOperator. This class manages the rewriting of context, control, and effect
+// dependencies during lowering of a binop and contains numerous helper
+// functions for matching the types of inputs to an operation.
+class JSBinopReduction {
+ public:
+  JSBinopReduction(JSTypedLowering* lowering, Node* node)
+      : lowering_(lowering),
+        node_(node),
+        left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
+        right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+
+  void ConvertInputsToNumber() {
+    node_->ReplaceInput(0, ConvertToNumber(left()));
+    node_->ReplaceInput(1, ConvertToNumber(right()));
+  }
+
+  void ConvertInputsToInt32(bool left_signed, bool right_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+  }
+
+  void ConvertInputsToString() {
+    node_->ReplaceInput(0, ConvertToString(left()));
+    node_->ReplaceInput(1, ConvertToString(right()));
+  }
+
+  // Convert inputs for bitwise shift operation (ES5 spec 11.7).
+  void ConvertInputsForShift(bool left_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    Node* rnum = ConvertToI32(false, right());
+    node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
+                                            jsgraph()->Int32Constant(0x1F)));
+  }
+
+  void SwapInputs() {
+    Node* l = left();
+    Node* r = right();
+    node_->ReplaceInput(0, r);
+    node_->ReplaceInput(1, l);
+    std::swap(left_type_, right_type_);
+  }
+
+  // Remove all effect and control inputs and outputs to this node and change
+  // to the pure operator {op}, possibly inserting a boolean inversion.
+  Reduction ChangeToPureOperator(const Operator* op, bool invert = false) {
+    DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op));
+    DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+    DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op));
+    DCHECK_EQ(2, OperatorProperties::GetValueInputCount(op));
+
+    // Remove the effects from the node, if any, and update its effect usages.
+    if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
+      RelaxEffects(node_);
+    }
+    // Remove the inputs corresponding to context, effect, and control.
+    NodeProperties::RemoveNonValueInputs(node_);
+    // Finally, update the operator to the new one.
+    node_->set_op(op);
+
+    if (invert) {
+      // Insert an boolean not to invert the value.
+      Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+      node_->ReplaceUses(value);
+      // Note: ReplaceUses() smashes all uses, so smash it back here.
+      value->ReplaceInput(0, node_);
+      return lowering_->ReplaceWith(value);
+    }
+    return lowering_->Changed(node_);
+  }
+
+  bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
+
+  bool BothInputsAre(Type* t) {
+    return left_type_->Is(t) && right_type_->Is(t);
+  }
+
+  bool OneInputCannotBe(Type* t) {
+    return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+  }
+
+  bool NeitherInputCanBe(Type* t) {
+    return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+  }
+
+  Node* effect() { return NodeProperties::GetEffectInput(node_); }
+  Node* control() { return NodeProperties::GetControlInput(node_); }
+  Node* context() { return NodeProperties::GetContextInput(node_); }
+  Node* left() { return NodeProperties::GetValueInput(node_, 0); }
+  Node* right() { return NodeProperties::GetValueInput(node_, 1); }
+  Type* left_type() { return left_type_; }
+  Type* right_type() { return right_type_; }
+
+  SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
+  Graph* graph() { return lowering_->graph(); }
+  JSGraph* jsgraph() { return lowering_->jsgraph(); }
+  JSOperatorBuilder* javascript() { return lowering_->javascript(); }
+  MachineOperatorBuilder* machine() { return lowering_->machine(); }
+
+ private:
+  JSTypedLowering* lowering_;  // The containing lowering instance.
+  Node* node_;                 // The original node.
+  Type* left_type_;            // Cache of the left input's type.
+  Type* right_type_;           // Cache of the right input's type.
+
+  Node* ConvertToString(Node* node) {
+    // Avoid introducing too many eager ToString() operations.
+    Reduction reduced = lowering_->ReduceJSToStringInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  Node* ConvertToNumber(Node* node) {
+    // Avoid introducing too many eager ToNumber() operations.
+    Reduction reduced = lowering_->ReduceJSToNumberInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  // Try to narrowing a double or number operation to an Int32 operation.
+  bool TryNarrowingToI32(Type* type, Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kNumberAdd: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Add());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kNumberSubtract: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Sub());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      default:
+        return false;
+    }
+  }
+
+  Node* ConvertToI32(bool is_signed, Node* node) {
+    Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
+    if (node->OwnedBy(node_)) {
+      // If this node {node_} has the only edge to {node}, then try narrowing
+      // its operation to an Int32 add or subtract.
+      if (TryNarrowingToI32(type, node)) return node;
+    } else {
+      // Otherwise, {node} has multiple uses. Leave it as is and let the
+      // further lowering passes deal with it, which use a full backwards
+      // fixpoint.
+    }
+
+    // Avoid introducing too many eager NumberToXXnt32() operations.
+    node = ConvertToNumber(node);
+    Type* input_type = NodeProperties::GetBounds(node).upper;
+
+    if (input_type->Is(type)) return node;  // already in the value range.
+
+    const Operator* op = is_signed ? simplified()->NumberToInt32()
+                                   : simplified()->NumberToUint32();
+    Node* n = graph()->NewNode(op, node);
+    return n;
+  }
+
+  void update_effect(Node* effect) {
+    NodeProperties::ReplaceEffectInput(node_, effect);
+  }
+};
+
+
+Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::Number())) {
+    // JSAdd(x:number, y:number) => NumberAdd(x, y)
+    return r.ChangeToPureOperator(simplified()->NumberAdd());
+  }
+  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+  if (r.NeitherInputCanBe(maybe_string)) {
+    // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberAdd());
+  }
+#if 0
+  // TODO(turbofan): Lowering of StringAdd is disabled for now because:
+  //   a) The inserted ToString operation screws up valueOf vs. toString order.
+  //   b) Deoptimization at ToString doesn't have corresponding bailout id.
+  //   c) Our current StringAddStub is actually non-pure and requires context.
+  if (r.OneInputIs(Type::String())) {
+    // JSAdd(x:string, y:string) => StringAdd(x, y)
+    // JSAdd(x:string, y) => StringAdd(x, ToString(y))
+    // JSAdd(x, y:string) => StringAdd(ToString(x), y)
+    r.ConvertInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringAdd());
+  }
+#endif
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
+                                             const Operator* numberOp) {
+  JSBinopReduction r(this, node);
+  if (r.OneInputIs(Type::Primitive())) {
+    // If at least one input is a primitive, then insert appropriate conversions
+    // to number and reduce this operator to the given numeric one.
+    // TODO(turbofan): make this heuristic configurable for code size.
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(numberOp);
+  }
+  // TODO(turbofan): relax/remove the effects of this operator in other cases.
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
+                                          bool right_signed,
+                                          const Operator* intOp) {
+  JSBinopReduction r(this, node);
+  // TODO(titzer): some Smi bitwise operations don't really require going
+  // all the way to int32, which can save tagging/untagging for some operations
+  // on some platforms.
+  // TODO(turbofan): make this heuristic configurable for code size.
+  r.ConvertInputsToInt32(left_signed, right_signed);
+  return r.ChangeToPureOperator(intOp);
+}
+
+
+Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
+                                          const Operator* shift_op) {
+  JSBinopReduction r(this, node);
+  r.ConvertInputsForShift(left_signed);
+  return r.ChangeToPureOperator(shift_op);
+}
+
+
+Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::String())) {
+    // If both inputs are definitely strings, perform a string comparison.
+    const Operator* stringOp;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        stringOp = simplified()->StringLessThan();
+        break;
+      case IrOpcode::kJSGreaterThan:
+        stringOp = simplified()->StringLessThan();
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(stringOp);
+  }
+  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+  if (r.OneInputCannotBe(maybe_string)) {
+    // If one input cannot be a string, then emit a number comparison.
+    const Operator* less_than;
+    const Operator* less_than_or_equal;
+    if (r.BothInputsAre(Type::Unsigned32())) {
+      less_than = machine()->Uint32LessThan();
+      less_than_or_equal = machine()->Uint32LessThanOrEqual();
+    } else if (r.BothInputsAre(Type::Signed32())) {
+      less_than = machine()->Int32LessThan();
+      less_than_or_equal = machine()->Int32LessThanOrEqual();
+    } else {
+      // TODO(turbofan): mixed signed/unsigned int32 comparisons.
+      r.ConvertInputsToNumber();
+      less_than = simplified()->NumberLessThan();
+      less_than_or_equal = simplified()->NumberLessThanOrEqual();
+    }
+    const Operator* comparison;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        comparison = less_than;
+        break;
+      case IrOpcode::kJSGreaterThan:
+        comparison = less_than;
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        comparison = less_than_or_equal;
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        comparison = less_than_or_equal;
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(comparison);
+  }
+  // TODO(turbofan): relax/remove effects of this operator in other cases.
+  return NoChange();  // Keep a generic comparison.
+}
+
+
+Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of Equal(undefined)
+  // TODO(turbofan): js-typed-lowering of Equal(null)
+  // TODO(turbofan): js-typed-lowering of Equal(boolean)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+  if (r.left() == r.right()) {
+    // x === x is always true if x != NaN
+    if (!r.left_type()->Maybe(Type::NaN())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
+                                         : jsgraph()->TrueConstant());
+    }
+  }
+  if (!r.left_type()->Maybe(r.right_type())) {
+    // Type intersection is empty; === is always false unless both
+    // inputs could be strings (one internalized and one not).
+    if (r.OneInputCannotBe(Type::String())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
+                                         : jsgraph()->FalseConstant());
+    }
+  }
+  if (r.OneInputIs(Type::Undefined())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Undefined()), invert);
+  }
+  if (r.OneInputIs(Type::Null())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Boolean())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Object())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToNumber) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToNumberInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Number())) {
+    // JSToNumber(x:number) => x
+    return Changed(input);
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToNumber(undefined) => #NaN
+    return ReplaceWith(jsgraph()->NaNConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToNumber(null) => #0
+    return ReplaceWith(jsgraph()->ZeroConstant());
+  }
+  if (input_type->Is(Type::Boolean())) {
+    // JSToNumber(x:boolean) => BooleanToNumber(x)
+    return ReplaceWith(
+        graph()->NewNode(simplified()->BooleanToNumber(), input));
+  }
+  // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToString) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToStringInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToString(JSToString(x)) => JSToString(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::String())) {
+    return Changed(input);  // JSToString(x:string) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->undefined_string()));
+  }
+  if (input_type->Is(Type::Null())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->null_string()));
+  }
+  // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
+  // TODO(turbofan): js-typed-lowering of ToString(x:number)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToBoolean) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    return Changed(input);  // JSToBoolean(x:boolean) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToBoolean(undefined) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToBoolean(null) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::DetectableReceiver())) {
+    // JSToBoolean(x:detectable) => #true
+    return ReplaceWith(jsgraph()->TrueConstant());
+  }
+  if (input_type->Is(Type::Undetectable())) {
+    // JSToBoolean(x:undetectable) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::OrderedNumber())) {
+    // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
+    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
+                                 jsgraph()->ZeroConstant());
+    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+    return ReplaceWith(inv);
+  }
+  // TODO(turbofan): js-typed-lowering of ToBoolean(string)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
+  Node* key = NodeProperties::GetValueInput(node, 1);
+  Node* base = NodeProperties::GetValueInput(node, 0);
+  Type* key_type = NodeProperties::GetBounds(key).upper;
+  Type* base_type = NodeProperties::GetBounds(base).upper;
+  // TODO(mstarzinger): This lowering is not correct if:
+  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
+  //   b) The typed array or it's buffer is neutered.
+  //   c) The index is out of bounds.
+  if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
+      base_type->AsConstant()->Value()->IsJSTypedArray()) {
+    // JSLoadProperty(typed-array, int32)
+    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+    ElementsKind elements_kind = array->map()->elements_kind();
+    ExternalArrayType type = array->type();
+    uint32_t length;
+    CHECK(array->length()->ToUint32(&length));
+    ElementAccess element_access;
+    Node* elements = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+        NodeProperties::GetEffectInput(node));
+    if (IsExternalArrayElementsKind(elements_kind)) {
+      elements = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+          elements, NodeProperties::GetEffectInput(node));
+      element_access = AccessBuilder::ForTypedArrayElement(type, true);
+    } else {
+      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    }
+    Node* value =
+        graph()->NewNode(simplified()->LoadElement(element_access), elements,
+                         key, jsgraph()->Uint32Constant(length),
+                         NodeProperties::GetEffectInput(node));
+    return ReplaceEagerly(node, value);
+  }
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
+  Node* key = NodeProperties::GetValueInput(node, 1);
+  Node* base = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 2);
+  Type* key_type = NodeProperties::GetBounds(key).upper;
+  Type* base_type = NodeProperties::GetBounds(base).upper;
+  // TODO(mstarzinger): This lowering is not correct if:
+  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
+  //   b) The typed array or it's buffer is neutered.
+  if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
+      base_type->AsConstant()->Value()->IsJSTypedArray()) {
+    // JSStoreProperty(typed-array, int32, value)
+    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+    ElementsKind elements_kind = array->map()->elements_kind();
+    ExternalArrayType type = array->type();
+    uint32_t length;
+    CHECK(array->length()->ToUint32(&length));
+    ElementAccess element_access;
+    Node* elements = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+        NodeProperties::GetEffectInput(node));
+    if (IsExternalArrayElementsKind(elements_kind)) {
+      elements = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+          elements, NodeProperties::GetEffectInput(node));
+      element_access = AccessBuilder::ForTypedArrayElement(type, true);
+    } else {
+      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    }
+
+    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
+                                   jsgraph()->Uint32Constant(length));
+    Node* branch = graph()->NewNode(common()->Branch(), check,
+                                    NodeProperties::GetControlInput(node));
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* store =
+        graph()->NewNode(simplified()->StoreElement(element_access), elements,
+                         key, jsgraph()->Uint32Constant(length), value,
+                         NodeProperties::GetEffectInput(node), if_true);
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
+                                 NodeProperties::GetEffectInput(node), merge);
+
+    return ReplaceWith(phi);
+  }
+  return NoChange();
+}
+
+
+static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  return Reducer::NoChange();
+}
+
+
+Reduction JSTypedLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kJSEqual:
+      return ReduceJSEqual(node, false);
+    case IrOpcode::kJSNotEqual:
+      return ReduceJSEqual(node, true);
+    case IrOpcode::kJSStrictEqual:
+      return ReduceJSStrictEqual(node, false);
+    case IrOpcode::kJSStrictNotEqual:
+      return ReduceJSStrictEqual(node, true);
+    case IrOpcode::kJSLessThan:         // fall through
+    case IrOpcode::kJSGreaterThan:      // fall through
+    case IrOpcode::kJSLessThanOrEqual:  // fall through
+    case IrOpcode::kJSGreaterThanOrEqual:
+      return ReduceJSComparison(node);
+    case IrOpcode::kJSBitwiseOr:
+      return ReduceI32Binop(node, true, true, machine()->Word32Or());
+    case IrOpcode::kJSBitwiseXor:
+      return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+    case IrOpcode::kJSBitwiseAnd:
+      return ReduceI32Binop(node, true, true, machine()->Word32And());
+    case IrOpcode::kJSShiftLeft:
+      return ReduceI32Shift(node, true, machine()->Word32Shl());
+    case IrOpcode::kJSShiftRight:
+      return ReduceI32Shift(node, true, machine()->Word32Sar());
+    case IrOpcode::kJSShiftRightLogical:
+      return ReduceI32Shift(node, false, machine()->Word32Shr());
+    case IrOpcode::kJSAdd:
+      return ReduceJSAdd(node);
+    case IrOpcode::kJSSubtract:
+      return ReduceNumberBinop(node, simplified()->NumberSubtract());
+    case IrOpcode::kJSMultiply:
+      return ReduceNumberBinop(node, simplified()->NumberMultiply());
+    case IrOpcode::kJSDivide:
+      return ReduceNumberBinop(node, simplified()->NumberDivide());
+    case IrOpcode::kJSModulus:
+      return ReduceNumberBinop(node, simplified()->NumberModulus());
+    case IrOpcode::kJSUnaryNot: {
+      Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
+      Node* value;
+      if (result.Changed()) {
+        // JSUnaryNot(x:boolean) => BooleanNot(x)
+        value =
+            graph()->NewNode(simplified()->BooleanNot(), result.replacement());
+        NodeProperties::ReplaceWithValue(node, value);
+        return Changed(value);
+      } else {
+        // JSUnaryNot(x) => BooleanNot(JSToBoolean(x))
+        value = graph()->NewNode(simplified()->BooleanNot(), node);
+        node->set_op(javascript()->ToBoolean());
+        NodeProperties::ReplaceWithValue(node, value, node);
+        // Note: ReplaceUses() smashes all uses, so smash it back here.
+        value->ReplaceInput(0, node);
+        return Changed(node);
+      }
+    }
+    case IrOpcode::kJSToBoolean:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToBooleanInput(node->InputAt(0)));
+    case IrOpcode::kJSToNumber:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToNumberInput(node->InputAt(0)));
+    case IrOpcode::kJSToString:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToStringInput(node->InputAt(0)));
+    case IrOpcode::kJSLoadProperty:
+      return ReduceJSLoadProperty(node);
+    case IrOpcode::kJSStoreProperty:
+      return ReduceJSStoreProperty(node);
+    case IrOpcode::kJSCallFunction:
+      return JSBuiltinReducer(jsgraph()).Reduce(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
new file mode 100644
index 0000000..deaf1fa
--- /dev/null
+++ b/src/compiler/js-typed-lowering.h
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
+#define V8_COMPILER_JS_TYPED_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Lowers JS-level operators to simplified operators based on types.
+class JSTypedLowering FINAL : public Reducer {
+ public:
+  explicit JSTypedLowering(JSGraph* jsgraph)
+      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+  virtual ~JSTypedLowering();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph_->graph(); }
+  Zone* zone() { return jsgraph_->zone(); }
+
+ private:
+  friend class JSBinopReduction;
+
+  Reduction ReplaceEagerly(Node* old, Node* node);
+  Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
+  Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSComparison(Node* node);
+  Reduction ReduceJSLoadProperty(Node* node);
+  Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSEqual(Node* node, bool invert);
+  Reduction ReduceJSStrictEqual(Node* node, bool invert);
+  Reduction ReduceJSToNumberInput(Node* input);
+  Reduction ReduceJSToStringInput(Node* input);
+  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
+  Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
+                           const Operator* intOp);
+  Reduction ReduceI32Shift(Node* node, bool left_signed,
+                           const Operator* shift_op);
+
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_TYPED_LOWERING_H_
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
new file mode 100644
index 0000000..c32c706
--- /dev/null
+++ b/src/compiler/linkage-impl.h
@@ -0,0 +1,226 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_IMPL_H_
+#define V8_COMPILER_LINKAGE_IMPL_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): replace uses of int with size_t in LinkageHelper.
+template <typename LinkageTraits>
+class LinkageHelper {
+ public:
+  static const RegList kNoCalleeSaved = 0;
+
+  static void AddReturnLocations(LocationSignature::Builder* locations) {
+    DCHECK(locations->return_count_ <= 2);
+    if (locations->return_count_ > 0) {
+      locations->AddReturn(regloc(LinkageTraits::ReturnValueReg()));
+    }
+    if (locations->return_count_ > 1) {
+      locations->AddReturn(regloc(LinkageTraits::ReturnValue2Reg()));
+    }
+  }
+
+  // TODO(turbofan): cache call descriptors for JSFunction calls.
+  static CallDescriptor* GetJSCallDescriptor(Zone* zone,
+                                             int js_parameter_count) {
+    const size_t return_count = 1;
+    const size_t context_count = 1;
+    const size_t parameter_count = js_parameter_count + context_count;
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add returns.
+    AddReturnLocations(&locations);
+    for (size_t i = 0; i < return_count; i++) {
+      types.AddReturn(kMachAnyTagged);
+    }
+
+    // All parameters to JS calls go on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      int spill_slot_index = i - js_parameter_count;
+      locations.AddParam(stackloc(spill_slot_index));
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // The target for JS function calls is the JSFunction object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
+    return new (zone) CallDescriptor(CallDescriptor::kCallJSFunction,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     kNoCalleeSaved,           // callee-saved
+                                     CallDescriptor::kNeedsFrameState,  // flags
+                                     "js-call");
+  }
+
+
+  // TODO(turbofan): cache call descriptors for runtime calls.
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
+      Operator::Properties properties) {
+    const size_t function_count = 1;
+    const size_t num_args_count = 1;
+    const size_t context_count = 1;
+    const size_t parameter_count = function_count +
+                                   static_cast<size_t>(js_parameter_count) +
+                                   num_args_count + context_count;
+
+    const Runtime::Function* function = Runtime::FunctionForId(function_id);
+    const size_t return_count = static_cast<size_t>(function->result_size);
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add returns.
+    AddReturnLocations(&locations);
+    for (size_t i = 0; i < return_count; i++) {
+      types.AddReturn(kMachAnyTagged);
+    }
+
+    // All parameters to the runtime call go on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      locations.AddParam(stackloc(i - js_parameter_count));
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add runtime function itself.
+    locations.AddParam(regloc(LinkageTraits::RuntimeCallFunctionReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // Add runtime call argument count.
+    locations.AddParam(regloc(LinkageTraits::RuntimeCallArgCountReg()));
+    types.AddParam(kMachPtr);
+
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    CallDescriptor::Flags flags = Linkage::NeedsFrameState(function_id)
+                                      ? CallDescriptor::kNeedsFrameState
+                                      : CallDescriptor::kNoFlags;
+
+    // The target for runtime calls is a code object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     properties,          // properties
+                                     kNoCalleeSaved,      // callee-saved
+                                     flags,               // flags
+                                     function->name);     // debug name
+  }
+
+
+  // TODO(turbofan): cache call descriptors for code stub calls.
+  static CallDescriptor* GetStubCallDescriptor(
+      Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      CallDescriptor::Flags flags) {
+    const int register_parameter_count =
+        descriptor.GetEnvironmentParameterCount();
+    const int js_parameter_count =
+        register_parameter_count + stack_parameter_count;
+    const int context_count = 1;
+    const size_t return_count = 1;
+    const size_t parameter_count =
+        static_cast<size_t>(js_parameter_count + context_count);
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add return location.
+    AddReturnLocations(&locations);
+    types.AddReturn(kMachAnyTagged);
+
+    // Add parameters in registers and on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      if (i < register_parameter_count) {
+        // The first parameters go in registers.
+        Register reg = descriptor.GetEnvironmentParameterRegister(i);
+        locations.AddParam(regloc(reg));
+      } else {
+        // The rest of the parameters go on the stack.
+        int stack_slot = i - register_parameter_count - stack_parameter_count;
+        locations.AddParam(stackloc(stack_slot));
+      }
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // The target for stub calls is a code object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     kNoCalleeSaved,  // callee-saved registers
+                                     flags,           // flags
+                                     descriptor.DebugName(zone->isolate()));
+  }
+
+  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* msig) {
+    LocationSignature::Builder locations(zone, msig->return_count(),
+                                         msig->parameter_count());
+    // Add return location(s).
+    AddReturnLocations(&locations);
+
+    // Add register and/or stack parameter(s).
+    const int parameter_count = static_cast<int>(msig->parameter_count());
+    for (int i = 0; i < parameter_count; i++) {
+      if (i < LinkageTraits::CRegisterParametersLength()) {
+        locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
+      } else {
+        locations.AddParam(stackloc(-1 - i));
+      }
+    }
+
+    // The target for C calls is always an address (i.e. machine pointer).
+    MachineType target_type = kMachPtr;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallAddress,  // kind
+                                     target_type,        // target MachineType
+                                     target_loc,         // target location
+                                     msig,               // machine_sig
+                                     locations.Build(),  // location_sig
+                                     0,                  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     LinkageTraits::CCalleeSaveRegisters(),
+                                     CallDescriptor::kNoFlags, "c-call");
+  }
+
+  static LinkageLocation regloc(Register reg) {
+    return LinkageLocation(Register::ToAllocationIndex(reg));
+  }
+
+  static LinkageLocation stackloc(int i) {
+    DCHECK_LT(i, 0);
+    return LinkageLocation(i);
+  }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
new file mode 100644
index 0000000..465a667
--- /dev/null
+++ b/src/compiler/linkage.cc
@@ -0,0 +1,170 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/compiler/node.h"
+#include "src/compiler/pipeline.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
+  switch (k) {
+    case CallDescriptor::kCallCodeObject:
+      os << "Code";
+      break;
+    case CallDescriptor::kCallJSFunction:
+      os << "JS";
+      break;
+    case CallDescriptor::kCallAddress:
+      os << "Addr";
+      break;
+  }
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const CallDescriptor& d) {
+  // TODO(svenpanne) Output properties etc. and be less cryptic.
+  return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
+            << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
+            << d.FrameStateCount();
+}
+
+
+Linkage::Linkage(CompilationInfo* info) : info_(info) {
+  if (info->function() != NULL) {
+    // If we already have the function literal, use the number of parameters
+    // plus the receiver.
+    incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
+  } else if (!info->closure().is_null()) {
+    // If we are compiling a JS function, use a JS call descriptor,
+    // plus the receiver.
+    SharedFunctionInfo* shared = info->closure()->shared();
+    incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
+  } else if (info->code_stub() != NULL) {
+    // Use the code stub interface descriptor.
+    CallInterfaceDescriptor descriptor =
+        info->code_stub()->GetCallInterfaceDescriptor();
+    incoming_ = GetStubCallDescriptor(descriptor);
+  } else {
+    incoming_ = NULL;  // TODO(titzer): ?
+  }
+}
+
+
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+  if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
+      incoming_->kind() == CallDescriptor::kCallAddress) {
+    int offset;
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    if (spill_slot >= 0) {
+      // Local or spill slot. Skip the frame pointer, function, and
+      // context in the fixed part of the frame.
+      offset =
+          -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
+    } else {
+      // Incoming parameter. Skip the return address.
+      offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
+               kPCOnStackSize + extra;
+    }
+    return FrameOffset::FromFramePointer(offset);
+  } else {
+    // No frame. Retrieve all parameters relative to stack pointer.
+    DCHECK(spill_slot < 0);  // Must be a parameter.
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
+                 kPCOnStackSize + extra;
+    return FrameOffset::FromStackPointer(offset);
+  }
+}
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
+  return GetJSCallDescriptor(parameter_count, this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties) {
+  return GetRuntimeCallDescriptor(function, parameter_count, properties,
+                                  this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags) {
+  return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
+                               this->info_->zone());
+}
+
+
+// static
+bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
+  if (!FLAG_turbo_deoptimization) {
+    return false;
+  }
+  // TODO(jarin) At the moment, we only add frame state for
+  // few chosen runtime functions.
+  switch (function) {
+    case Runtime::kDebugBreak:
+    case Runtime::kDebugGetLoadedScripts:
+    case Runtime::kDeoptimizeFunction:
+    case Runtime::kInlineCallFunction:
+    case Runtime::kPrepareStep:
+    case Runtime::kSetScriptBreakPoint:
+    case Runtime::kStackGuard:
+    case Runtime::kCheckExecutionState:
+    case Runtime::kDebugEvaluate:
+    case Runtime::kCollectStackTrace:
+      return true;
+    default:
+      return false;
+  }
+}
+
+
+//==============================================================================
+// Provide unimplemented methods on unsupported architectures, to at least link.
+//==============================================================================
+#if !V8_TURBOFAN_BACKEND
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+#endif  // !V8_TURBOFAN_BACKEND
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
new file mode 100644
index 0000000..c5cef5e
--- /dev/null
+++ b/src/compiler/linkage.h
@@ -0,0 +1,232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_H_
+#define V8_COMPILER_LINKAGE_H_
+
+#include "src/base/flags.h"
+#include "src/code-stubs.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/machine-type.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Describes the location for a parameter or a return value to a call.
+class LinkageLocation {
+ public:
+  explicit LinkageLocation(int location) : location_(location) {}
+
+  static const int16_t ANY_REGISTER = 32767;
+
+  static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
+
+ private:
+  friend class CallDescriptor;
+  friend class OperandGenerator;
+  int16_t location_;  // >= 0 implies register, otherwise stack slot.
+};
+
+typedef Signature<LinkageLocation> LocationSignature;
+
+// Describes a call to various parts of the compiler. Every call has the notion
+// of a "target", which is the first input to the call.
+class CallDescriptor FINAL : public ZoneObject {
+ public:
+  // Describes the kind of this call, which determines the target.
+  enum Kind {
+    kCallCodeObject,  // target is a Code object
+    kCallJSFunction,  // target is a JSFunction object
+    kCallAddress      // target is a machine pointer
+  };
+
+  enum Flag {
+    // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
+    kNoFlags = 0u,
+    kNeedsFrameState = 1u << 0,
+    kPatchableCallSite = 1u << 1,
+    kNeedsNopAfterCall = 1u << 2,
+    kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+  };
+  typedef base::Flags<Flag> Flags;
+
+  CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
+                 MachineSignature* machine_sig, LocationSignature* location_sig,
+                 size_t js_param_count, Operator::Properties properties,
+                 RegList callee_saved_registers, Flags flags,
+                 const char* debug_name = "")
+      : kind_(kind),
+        target_type_(target_type),
+        target_loc_(target_loc),
+        machine_sig_(machine_sig),
+        location_sig_(location_sig),
+        js_param_count_(js_param_count),
+        properties_(properties),
+        callee_saved_registers_(callee_saved_registers),
+        flags_(flags),
+        debug_name_(debug_name) {
+    DCHECK(machine_sig->return_count() == location_sig->return_count());
+    DCHECK(machine_sig->parameter_count() == location_sig->parameter_count());
+  }
+
+  // Returns the kind of this call.
+  Kind kind() const { return kind_; }
+
+  // Returns {true} if this descriptor is a call to a JSFunction.
+  bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+
+  // The number of return values from this call.
+  size_t ReturnCount() const { return machine_sig_->return_count(); }
+
+  // The number of JavaScript parameters to this call, including the receiver
+  // object.
+  size_t JSParameterCount() const { return js_param_count_; }
+
+  // The total number of inputs to this call, which includes the target,
+  // receiver, context, etc.
+  // TODO(titzer): this should input the framestate input too.
+  size_t InputCount() const { return 1 + machine_sig_->parameter_count(); }
+
+  size_t FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
+
+  Flags flags() const { return flags_; }
+
+  bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
+
+  LinkageLocation GetReturnLocation(size_t index) const {
+    return location_sig_->GetReturn(index);
+  }
+
+  LinkageLocation GetInputLocation(size_t index) const {
+    if (index == 0) return target_loc_;
+    return location_sig_->GetParam(index - 1);
+  }
+
+  const MachineSignature* GetMachineSignature() const { return machine_sig_; }
+
+  MachineType GetReturnType(size_t index) const {
+    return machine_sig_->GetReturn(index);
+  }
+
+  MachineType GetInputType(size_t index) const {
+    if (index == 0) return target_type_;
+    return machine_sig_->GetParam(index - 1);
+  }
+
+  // Operator properties describe how this call can be optimized, if at all.
+  Operator::Properties properties() const { return properties_; }
+
+  // Get the callee-saved registers, if any, across this call.
+  RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
+
+  const char* debug_name() const { return debug_name_; }
+
+ private:
+  friend class Linkage;
+
+  Kind kind_;
+  MachineType target_type_;
+  LinkageLocation target_loc_;
+  MachineSignature* machine_sig_;
+  LocationSignature* location_sig_;
+  size_t js_param_count_;
+  Operator::Properties properties_;
+  RegList callee_saved_registers_;
+  Flags flags_;
+  const char* debug_name_;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
+
+OStream& operator<<(OStream& os, const CallDescriptor& d);
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
+
+// Defines the linkage for a compilation, including the calling conventions
+// for incoming parameters and return value(s) as well as the outgoing calling
+// convention for any kind of call. Linkage is generally architecture-specific.
+//
+// Can be used to translate {arg_index} (i.e. index of the call node input) as
+// well as {param_index} (i.e. as stored in parameter nodes) into an operator
+// representing the architecture-specific location. The following call node
+// layouts are supported (where {n} is the number value inputs):
+//
+//                  #0          #1     #2     #3     [...]             #n
+// Call[CodeStub]   code,       arg 1, arg 2, arg 3, [...],            context
+// Call[JSFunction] function,   rcvr,  arg 1, arg 2, [...],            context
+// Call[Runtime]    CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+class Linkage : public ZoneObject {
+ public:
+  explicit Linkage(CompilationInfo* info);
+  explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
+      : info_(info), incoming_(incoming) {}
+
+  // The call descriptor for this compilation unit describes the locations
+  // of incoming parameters and the outgoing return value(s).
+  CallDescriptor* GetIncomingDescriptor() { return incoming_; }
+  CallDescriptor* GetJSCallDescriptor(int parameter_count);
+  static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
+  CallDescriptor* GetRuntimeCallDescriptor(Runtime::FunctionId function,
+                                           int parameter_count,
+                                           Operator::Properties properties);
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Runtime::FunctionId function, int parameter_count,
+      Operator::Properties properties, Zone* zone);
+
+  CallDescriptor* GetStubCallDescriptor(
+      CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
+  static CallDescriptor* GetStubCallDescriptor(
+      CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      CallDescriptor::Flags flags, Zone* zone);
+
+  // Creates a call descriptor for simplified C calls that is appropriate
+  // for the host platform. This simplified calling convention only supports
+  // integers and pointers of one word size each, i.e. no floating point,
+  // structs, pointers to members, etc.
+  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig);
+
+  // Get the location of an (incoming) parameter to this function.
+  LinkageLocation GetParameterLocation(int index) {
+    return incoming_->GetInputLocation(index + 1);  // + 1 to skip target.
+  }
+
+  // Get the machine type of an (incoming) parameter to this function.
+  MachineType GetParameterType(int index) {
+    return incoming_->GetInputType(index + 1);  // + 1 to skip target.
+  }
+
+  // Get the location where this function should place its return value.
+  LinkageLocation GetReturnLocation() {
+    return incoming_->GetReturnLocation(0);
+  }
+
+  // Get the machine type of this function's return value.
+  MachineType GetReturnType() { return incoming_->GetReturnType(0); }
+
+  // Get the frame offset for a given spill slot. The location depends on the
+  // calling convention and the specific frame layout, and may thus be
+  // architecture-specific. Negative spill slots indicate arguments on the
+  // caller's frame. The {extra} parameter indicates an additional offset from
+  // the frame offset, e.g. to index into part of a double slot.
+  FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
+
+  CompilationInfo* info() const { return info_; }
+
+  static bool NeedsFrameState(Runtime::FunctionId function);
+
+ private:
+  CompilationInfo* info_;
+  CallDescriptor* incoming_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LINKAGE_H_
diff --git a/src/compiler/machine-operator-reducer-unittest.cc b/src/compiler/machine-operator-reducer-unittest.cc
new file mode 100644
index 0000000..f3073ab
--- /dev/null
+++ b/src/compiler/machine-operator-reducer-unittest.cc
@@ -0,0 +1,616 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineOperatorReducerTest : public GraphTest {
+ public:
+  explicit MachineOperatorReducerTest(int num_parameters = 2)
+      : GraphTest(num_parameters) {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine_);
+    MachineOperatorReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+  MachineOperatorBuilder machine_;
+};
+
+
+template <typename T>
+class MachineOperatorReducerTestWithParam
+    : public MachineOperatorReducerTest,
+      public ::testing::WithParamInterface<T> {
+ public:
+  explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
+      : MachineOperatorReducerTest(num_parameters) {}
+  virtual ~MachineOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const float kFloat32Values[] = {
+    -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+    -1.22813e+35f,                           -1.20555e+35f, -1.34584e+34f,
+    -1.0079e+32f,                            -6.49364e+26f, -3.06077e+25f,
+    -1.46821e+25f,                           -1.17658e+23f, -1.9617e+22f,
+    -2.7357e+20f,                            -1.48708e+13f, -1.89633e+12f,
+    -4.66622e+11f,                           -2.22581e+11f, -1.45381e+10f,
+    -1.3956e+09f,                            -1.32951e+09f, -1.30721e+09f,
+    -1.19756e+09f,                           -9.26822e+08f, -6.35647e+08f,
+    -4.00037e+08f,                           -1.81227e+08f, -5.09256e+07f,
+    -964300.0f,                              -192446.0f,    -28455.0f,
+    -27194.0f,                               -26401.0f,     -20575.0f,
+    -17069.0f,                               -9167.0f,      -960.178f,
+    -113.0f,                                 -62.0f,        -15.0f,
+    -7.0f,                                   -0.0256635f,   -4.60374e-07f,
+    -3.63759e-10f,                           -4.30175e-14f, -5.27385e-15f,
+    -1.48084e-15f,                           -1.05755e-19f, -3.2995e-21f,
+    -1.67354e-23f,                           -1.11885e-23f, -1.78506e-30f,
+    -5.07594e-31f,                           -3.65799e-31f, -1.43718e-34f,
+    -1.27126e-38f,                           -0.0f,         0.0f,
+    1.17549e-38f,                            1.56657e-37f,  4.08512e-29f,
+    3.31357e-28f,                            6.25073e-22f,  4.1723e-13f,
+    1.44343e-09f,                            5.27004e-08f,  9.48298e-08f,
+    5.57888e-07f,                            4.89988e-05f,  0.244326f,
+    12.4895f,                                19.0f,         47.0f,
+    106.0f,                                  538.324f,      564.536f,
+    819.124f,                                7048.0f,       12611.0f,
+    19878.0f,                                20309.0f,      797056.0f,
+    1.77219e+09f,                            1.51116e+11f,  4.18193e+13f,
+    3.59167e+16f,                            3.38211e+19f,  2.67488e+20f,
+    1.78831e+21f,                            9.20914e+21f,  8.35654e+23f,
+    1.4495e+24f,                             5.94015e+25f,  4.43608e+30f,
+    2.44502e+33f,                            2.61152e+33f,  1.38178e+37f,
+    1.71306e+37f,                            3.31899e+38f,  3.40282e+38f,
+    std::numeric_limits<float>::infinity()};
+
+
+static const double kFloat64Values[] = {
+    -V8_INFINITY,  -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
+    -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
+    -1.67813e+72,  -2.3382e+55,   -3.179e+30,    -1.441e+09,    -1.0647e+09,
+    -7.99361e+08,  -5.77375e+08,  -2.20984e+08,  -32757,        -13171,
+    -9970,         -3984,         -107,          -105,          -92,
+    -77,           -61,           -0.000208163,  -1.86685e-06,  -1.17296e-10,
+    -9.26358e-11,  -5.08004e-60,  -1.74753e-65,  -1.06561e-71,  -5.67879e-79,
+    -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
+    -4.40497e-267, -2.19666e-273, -4.9998e-276,  -5.59821e-278, -2.03855e-282,
+    -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0,          0.0,
+    2.22507e-308,  1.30127e-270,  7.62898e-260,  4.00313e-249,  3.16829e-233,
+    1.85244e-228,  2.03544e-129,  1.35126e-110,  1.01182e-106,  5.26333e-94,
+    1.35292e-90,   2.85394e-83,   1.78323e-77,   5.4967e-57,    1.03207e-25,
+    4.57401e-25,   1.58738e-05,   2,             125,           2310,
+    9636,          14802,         17168,         28945,         29305,
+    4.81336e+07,   1.41207e+08,   4.65962e+08,   1.40499e+09,   2.12648e+09,
+    8.80006e+30,   1.4446e+45,    1.12164e+54,   2.48188e+89,   6.71121e+102,
+    3.074e+112,    4.9699e+152,   5.58383e+166,  4.30654e+172,  7.08824e+185,
+    9.6586e+214,   2.028e+223,    6.63277e+243,  1.56192e+261,  1.23202e+269,
+    5.72883e+289,  8.5798e+290,   1.40256e+294,  1.79769e+308,  V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+    -2147483647 - 1, -1914954528, -1698749618, -1578693386, -1577976073,
+    -1573998034,     -1529085059, -1499540537, -1299205097, -1090814845,
+    -938186388,      -806828902,  -750927650,  -520676892,  -513661538,
+    -453036354,      -433622833,  -282638793,  -28375,      -27788,
+    -22770,          -18806,      -14173,      -11956,      -11200,
+    -10212,          -8160,       -3751,       -2758,       -1522,
+    -121,            -120,        -118,        -117,        -106,
+    -84,             -80,         -74,         -59,         -52,
+    -48,             -39,         -35,         -17,         -11,
+    -10,             -9,          -7,          -5,          0,
+    9,               12,          17,          23,          29,
+    31,              33,          35,          40,          47,
+    55,              56,          62,          64,          67,
+    68,              69,          74,          79,          84,
+    89,              90,          97,          104,         118,
+    124,             126,         127,         7278,        17787,
+    24136,           24202,       25570,       26680,       30242,
+    32399,           420886487,   642166225,   821912648,   822577803,
+    851385718,       1212241078,  1411419304,  1589626102,  1596437184,
+    1876245816,      1954730266,  2008792749,  2045320228,  2147483647};
+
+
+static const int64_t kInt64Values[] = {
+    V8_INT64_C(-9223372036854775807) - 1, V8_INT64_C(-8974392461363618006),
+    V8_INT64_C(-8874367046689588135),     V8_INT64_C(-8269197512118230839),
+    V8_INT64_C(-8146091527100606733),     V8_INT64_C(-7550917981466150848),
+    V8_INT64_C(-7216590251577894337),     V8_INT64_C(-6464086891160048440),
+    V8_INT64_C(-6365616494908257190),     V8_INT64_C(-6305630541365849726),
+    V8_INT64_C(-5982222642272245453),     V8_INT64_C(-5510103099058504169),
+    V8_INT64_C(-5496838675802432701),     V8_INT64_C(-4047626578868642657),
+    V8_INT64_C(-4033755046900164544),     V8_INT64_C(-3554299241457877041),
+    V8_INT64_C(-2482258764588614470),     V8_INT64_C(-1688515425526875335),
+    V8_INT64_C(-924784137176548532),      V8_INT64_C(-725316567157391307),
+    V8_INT64_C(-439022654781092241),      V8_INT64_C(-105545757668917080),
+    V8_INT64_C(-2088319373),              V8_INT64_C(-2073699916),
+    V8_INT64_C(-1844949911),              V8_INT64_C(-1831090548),
+    V8_INT64_C(-1756711933),              V8_INT64_C(-1559409497),
+    V8_INT64_C(-1281179700),              V8_INT64_C(-1211513985),
+    V8_INT64_C(-1182371520),              V8_INT64_C(-785934753),
+    V8_INT64_C(-767480697),               V8_INT64_C(-705745662),
+    V8_INT64_C(-514362436),               V8_INT64_C(-459916580),
+    V8_INT64_C(-312328082),               V8_INT64_C(-302949707),
+    V8_INT64_C(-285499304),               V8_INT64_C(-125701262),
+    V8_INT64_C(-95139843),                V8_INT64_C(-32768),
+    V8_INT64_C(-27542),                   V8_INT64_C(-23600),
+    V8_INT64_C(-18582),                   V8_INT64_C(-17770),
+    V8_INT64_C(-9086),                    V8_INT64_C(-9010),
+    V8_INT64_C(-8244),                    V8_INT64_C(-2890),
+    V8_INT64_C(-103),                     V8_INT64_C(-34),
+    V8_INT64_C(-27),                      V8_INT64_C(-25),
+    V8_INT64_C(-9),                       V8_INT64_C(-7),
+    V8_INT64_C(0),                        V8_INT64_C(2),
+    V8_INT64_C(38),                       V8_INT64_C(58),
+    V8_INT64_C(65),                       V8_INT64_C(93),
+    V8_INT64_C(111),                      V8_INT64_C(1003),
+    V8_INT64_C(1267),                     V8_INT64_C(12797),
+    V8_INT64_C(23122),                    V8_INT64_C(28200),
+    V8_INT64_C(30888),                    V8_INT64_C(42648848),
+    V8_INT64_C(116836693),                V8_INT64_C(263003643),
+    V8_INT64_C(571039860),                V8_INT64_C(1079398689),
+    V8_INT64_C(1145196402),               V8_INT64_C(1184846321),
+    V8_INT64_C(1758281648),               V8_INT64_C(1859991374),
+    V8_INT64_C(1960251588),               V8_INT64_C(2042443199),
+    V8_INT64_C(296220586027987448),       V8_INT64_C(1015494173071134726),
+    V8_INT64_C(1151237951914455318),      V8_INT64_C(1331941174616854174),
+    V8_INT64_C(2022020418667972654),      V8_INT64_C(2450251424374977035),
+    V8_INT64_C(3668393562685561486),      V8_INT64_C(4858229301215502171),
+    V8_INT64_C(4919426235170669383),      V8_INT64_C(5034286595330341762),
+    V8_INT64_C(5055797915536941182),      V8_INT64_C(6072389716149252074),
+    V8_INT64_C(6185309910199801210),      V8_INT64_C(6297328311011094138),
+    V8_INT64_C(6932372858072165827),      V8_INT64_C(8483640924987737210),
+    V8_INT64_C(8663764179455849203),      V8_INT64_C(8877197042645298254),
+    V8_INT64_C(8901543506779157333),      V8_INT64_C(9223372036854775807)};
+
+
+static const uint32_t kUint32Values[] = {
+    0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+    0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+    0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+    0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+    0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+    0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+    0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+    0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+  const Operator* (MachineOperatorBuilder::*constructor)();
+  const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+  return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+    {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
+    {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
+    {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
+    {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
+    {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
+    {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
+    {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
+    {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
+
+}  // namespace
+
+
+typedef MachineOperatorReducerTestWithParam<UnaryOperator>
+    MachineUnaryOperatorReducerTest;
+
+
+TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
+  const UnaryOperator unop = GetParam();
+  Reduction reduction =
+      Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
+                        MachineUnaryOperatorReducerTest,
+                        ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(float, x, kFloat32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+       ChangeFloat64ToInt32WithChangeInt32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->ChangeFloat64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat64ToInt32(), Float64Constant(FastI2D(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToUint32
+
+
+TEST_F(MachineOperatorReducerTest,
+       ChangeFloat64ToUint32WithChangeUint32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->ChangeFloat64ToUint32(),
+      graph()->NewNode(machine()->ChangeUint32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToFloat64WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->ChangeInt32ToFloat64(), Int32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastI2D(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToInt64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->ChangeInt32ToInt64(), Int32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
+                                Int32Constant(bit_cast<int32_t>(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToUint64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
+                                Int32Constant(bit_cast<int32_t>(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+       TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateFloat64ToFloat32(),
+      graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(double, x, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+       TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateFloat64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+  TRACED_FOREACH(double, x, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithChangeInt32ToInt64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateInt64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToInt64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
+  TRACED_FOREACH(int64_t, x, kInt64Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt32Constant(bit_cast<int32_t>(
+                    static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Ror
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
+  Node* value = Parameter(0);
+  Node* shift = Parameter(1);
+  Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
+  Node* shr = graph()->NewNode(
+      machine()->Word32Shr(), value,
+      graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
+
+  // (x << y) | (x >> (32 - y)) => x ror y
+  Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+  Reduction reduction1 = Reduce(node1);
+  EXPECT_TRUE(reduction1.Changed());
+  EXPECT_EQ(reduction1.replacement(), node1);
+  EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
+
+  // (x >> (32 - y)) | (x << y) => x ror y
+  Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+  Reduction reduction2 = Reduce(node2);
+  EXPECT_TRUE(reduction2.Changed());
+  EXPECT_EQ(reduction2.replacement(), node2);
+  EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
+}
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
+  Node* value = Parameter(0);
+  TRACED_FORRANGE(int32_t, k, 0, 31) {
+    Node* shl =
+        graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(k));
+    Node* shr =
+        graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
+
+    // (x << K) | (x >> ((32 - K) - y)) => x ror K
+    Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+    Reduction reduction1 = Reduce(node1);
+    EXPECT_TRUE(reduction1.Changed());
+    EXPECT_EQ(reduction1.replacement(), node1);
+    EXPECT_THAT(reduction1.replacement(),
+                IsWord32Ror(value, IsInt32Constant(k)));
+
+    // (x >> (32 - K)) | (x << K) => x ror K
+    Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+    Reduction reduction2 = Reduce(node2);
+    EXPECT_TRUE(reduction2.Changed());
+    EXPECT_EQ(reduction2.replacement(), node2);
+    EXPECT_THAT(reduction2.replacement(),
+                IsWord32Ror(value, IsInt32Constant(k)));
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithZeroShift) {
+  Node* value = Parameter(0);
+  Node* node =
+      graph()->NewNode(machine()->Word32Ror(), value, Int32Constant(0));
+  Reduction reduction = Reduce(node);
+  EXPECT_TRUE(reduction.Changed());
+  EXPECT_EQ(reduction.replacement(), value);
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
+  TRACED_FOREACH(int32_t, x, kUint32Values) {
+    TRACED_FORRANGE(int32_t, y, 0, 31) {
+      Node* node = graph()->NewNode(machine()->Word32Ror(), Int32Constant(x),
+                                    Int32Constant(y));
+      Reduction reduction = Reduce(node);
+      EXPECT_TRUE(reduction.Changed());
+      EXPECT_THAT(reduction.replacement(),
+                  IsInt32Constant(base::bits::RotateRight32(x, y)));
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32AddWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+  Node* p0 = Parameter(0);
+  {
+    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+                                 Int32Constant(0), p0);
+
+    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+    r = Reduce(graph()->NewNode(common()->Projection(0), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(p0, r.replacement());
+  }
+  {
+    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
+                                 Int32Constant(0));
+
+    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+    r = Reduce(graph()->NewNode(common()->Projection(0), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(p0, r.replacement());
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    TRACED_FOREACH(int32_t, y, kInt32Values) {
+      int32_t z;
+      Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+                                   Int32Constant(x), Int32Constant(y));
+
+      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
+
+      r = Reduce(graph()->NewNode(common()->Projection(0), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32SubWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+  Node* p0 = Parameter(0);
+  Node* add =
+      graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+
+  Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+  r = Reduce(graph()->NewNode(common()->Projection(0), add));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    TRACED_FOREACH(int32_t, y, kInt32Values) {
+      int32_t z;
+      Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
+                                   Int32Constant(x), Int32Constant(y));
+
+      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
+
+      r = Reduce(graph()->NewNode(common()->Projection(0), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
new file mode 100644
index 0000000..9328547
--- /dev/null
+++ b/src/compiler/machine-operator-reducer.cc
@@ -0,0 +1,504 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator-reducer.h"
+
+#include "src/base/bits.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
+    : jsgraph_(jsgraph) {}
+
+
+MachineOperatorReducer::~MachineOperatorReducer() {}
+
+
+Node* MachineOperatorReducer::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* MachineOperatorReducer::Float64Constant(volatile double value) {
+  return jsgraph()->Float64Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int32Constant(int32_t value) {
+  return jsgraph()->Int32Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int64Constant(int64_t value) {
+  return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+// Perform constant folding and strength reduction on machine operators.
+Reduction MachineOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kProjection:
+      return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
+    case IrOpcode::kWord32And: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
+      if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
+      if (m.IsFoldable()) {                                   // K & K  => K
+        return ReplaceInt32(m.left().Value() & m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
+      break;
+    }
+    case IrOpcode::kWord32Or: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+      if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+      if (m.IsFoldable()) {                                    // K | K  => K
+        return ReplaceInt32(m.left().Value() | m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+      if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
+        Int32BinopMatcher mleft(m.left().node());
+        Int32BinopMatcher mright(m.right().node());
+        if (mleft.left().node() == mright.left().node()) {
+          // (x << y) | (x >> (32 - y)) => x ror y
+          if (mright.right().IsInt32Sub()) {
+            Int32BinopMatcher mrightright(mright.right().node());
+            if (mrightright.left().Is(32) &&
+                mrightright.right().node() == mleft.right().node()) {
+              node->set_op(machine()->Word32Ror());
+              node->ReplaceInput(0, mleft.left().node());
+              node->ReplaceInput(1, mleft.right().node());
+              return Changed(node);
+            }
+          }
+          // (x << K) | (x >> (32 - K)) => x ror K
+          if (mleft.right().IsInRange(0, 31) &&
+              mright.right().Is(32 - mleft.right().Value())) {
+            node->set_op(machine()->Word32Ror());
+            node->ReplaceInput(0, mleft.left().node());
+            node->ReplaceInput(1, mleft.right().node());
+            return Changed(node);
+          }
+        }
+      }
+      if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+        // (x >> (32 - y)) | (x << y)  => x ror y
+        Int32BinopMatcher mleft(m.left().node());
+        Int32BinopMatcher mright(m.right().node());
+        if (mleft.left().node() == mright.left().node()) {
+          if (mleft.right().IsInt32Sub()) {
+            Int32BinopMatcher mleftright(mleft.right().node());
+            if (mleftright.left().Is(32) &&
+                mleftright.right().node() == mright.right().node()) {
+              node->set_op(machine()->Word32Ror());
+              node->ReplaceInput(0, mright.left().node());
+              node->ReplaceInput(1, mright.right().node());
+              return Changed(node);
+            }
+          }
+          // (x >> (32 - K)) | (x << K) => x ror K
+          if (mright.right().IsInRange(0, 31) &&
+              mleft.right().Is(32 - mright.right().Value())) {
+            node->set_op(machine()->Word32Ror());
+            node->ReplaceInput(0, mright.left().node());
+            node->ReplaceInput(1, mright.right().node());
+            return Changed(node);
+          }
+        }
+      }
+      break;
+    }
+    case IrOpcode::kWord32Xor: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
+      if (m.IsFoldable()) {                                  // K ^ K => K
+        return ReplaceInt32(m.left().Value() ^ m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
+      break;
+    }
+    case IrOpcode::kWord32Shl: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+      if (m.IsFoldable()) {                                  // K << K => K
+        return ReplaceInt32(m.left().Value() << m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Shr: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
+      if (m.IsFoldable()) {                                  // K >>> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Sar: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >> 0 => x
+      if (m.IsFoldable()) {                                  // K >> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Ror: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x ror 0 => x
+      if (m.IsFoldable()) {                                  // K ror K => K
+        return ReplaceInt32(
+            base::bits::RotateRight32(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kWord32Equal: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K == K => K
+        return ReplaceBool(m.left().Value() == m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y == 0 => x == y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
+      break;
+    }
+    case IrOpcode::kInt32Add: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
+      if (m.IsFoldable()) {                                  // K + K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kInt32Sub: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
+      if (m.IsFoldable()) {                                  // K - K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x - x => 0
+      break;
+    }
+    case IrOpcode::kInt32Mul: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x * 0 => 0
+      if (m.right().Is(1)) return Replace(m.left().node());   // x * 1 => x
+      if (m.IsFoldable()) {                                   // K * K => K
+        return ReplaceInt32(m.left().Value() * m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x * -1 => 0 - x
+        node->set_op(machine()->Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      if (m.right().IsPowerOf2()) {  // x * 2^n => x << n
+        node->set_op(machine()->Word32Shl());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Div: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x / -1 => 0 - x
+        node->set_op(machine()->Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32UDiv: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x / 2^n => x >> n
+        node->set_op(machine()->Word32Shr());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Mod: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);   // x % 1  => 0
+      if (m.right().Is(-1)) return ReplaceInt32(0);  // x % -1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kInt32UMod: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);  // x % 1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x % 2^n => x & 2^n-1
+        node->set_op(machine()->Word32And());
+        node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32LessThan: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y < 0 => x < y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 < x - y => y < x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kInt32LessThanOrEqual: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y <= 0 => x <= y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 <= x - y => y <= x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kUint32LessThan: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(kMaxUInt32)) return ReplaceBool(false);  // M < x => false
+      if (m.right().Is(0)) return ReplaceBool(false);          // x < 0 => false
+      if (m.IsFoldable()) {                                    // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kUint32LessThanOrEqual: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(0)) return ReplaceBool(true);            // 0 <= x => true
+      if (m.right().Is(kMaxUInt32)) return ReplaceBool(true);  // x <= M => true
+      if (m.IsFoldable()) {                                    // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kFloat64Add: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K + K => K
+        return ReplaceFloat64(m.left().Value() + m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Sub: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K - K => K
+        return ReplaceFloat64(m.left().Value() - m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mul: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
+      if (m.right().IsNaN()) {                               // x * NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.IsFoldable()) {  // K * K => K
+        return ReplaceFloat64(m.left().Value() * m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Div: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1.0 => x
+      if (m.right().IsNaN()) {                               // x / NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN / x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K / K => K
+        return ReplaceFloat64(m.left().Value() / m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mod: {
+      Float64BinopMatcher m(node);
+      if (m.right().IsNaN()) {  // x % NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN % x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K % K => K
+        return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kChangeFloat32ToFloat64: {
+      Float32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToUint32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
+      if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeInt32ToFloat64: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
+      break;
+    }
+    case IrOpcode::kChangeInt32ToInt64: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt64(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeUint32ToFloat64: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
+      break;
+    }
+    case IrOpcode::kChangeUint32ToUint64: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
+      break;
+    }
+    case IrOpcode::kTruncateFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      Int64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+      if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kTruncateFloat64ToFloat32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
+      if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32AddWithOverflow: {
+      DCHECK(index == 0 || index == 1);
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {
+        int32_t val;
+        bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
+                                                   m.right().Value(), &val);
+        return ReplaceInt32((index == 0) ? val : ovf);
+      }
+      if (m.right().Is(0)) {
+        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+      }
+      break;
+    }
+    case IrOpcode::kInt32SubWithOverflow: {
+      DCHECK(index == 0 || index == 1);
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {
+        int32_t val;
+        bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
+                                                   m.right().Value(), &val);
+        return ReplaceInt32((index == 0) ? val : ovf);
+      }
+      if (m.right().Is(0)) {
+        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+CommonOperatorBuilder* MachineOperatorReducer::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* MachineOperatorReducer::machine() const {
+  return jsgraph()->machine();
+}
+
+
+Graph* MachineOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
new file mode 100644
index 0000000..c79ceae
--- /dev/null
+++ b/src/compiler/machine-operator-reducer.h
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+
+
+// Performs constant folding and strength reduction on nodes that have
+// machine operators.
+class MachineOperatorReducer FINAL : public Reducer {
+ public:
+  explicit MachineOperatorReducer(JSGraph* jsgraph);
+  ~MachineOperatorReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Node* Float32Constant(volatile float value);
+  Node* Float64Constant(volatile double value);
+  Node* Int32Constant(int32_t value);
+  Node* Int64Constant(int64_t value);
+
+  Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+  Reduction ReplaceFloat32(volatile float value) {
+    return Replace(Float32Constant(value));
+  }
+  Reduction ReplaceFloat64(volatile double value) {
+    return Replace(Float64Constant(value));
+  }
+  Reduction ReplaceInt32(int32_t value) {
+    return Replace(Int32Constant(value));
+  }
+  Reduction ReplaceInt64(int64_t value) {
+    return Replace(Int64Constant(value));
+  }
+
+  Reduction ReduceProjection(size_t index, Node* node);
+
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
diff --git a/src/compiler/machine-operator-unittest.cc b/src/compiler/machine-operator-unittest.cc
new file mode 100644
index 0000000..cb93ce7
--- /dev/null
+++ b/src/compiler/machine-operator-unittest.cc
@@ -0,0 +1,325 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#if GTEST_HAS_COMBINE
+
+// TODO(bmeurer): Find a new home for these.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+  OStringStream ost;
+  ost << type;
+  return os << ost.c_str();
+}
+inline std::ostream& operator<<(std::ostream& os,
+                         const WriteBarrierKind& write_barrier_kind) {
+  OStringStream ost;
+  ost << write_barrier_kind;
+  return os << ost.c_str();
+}
+
+
+template <typename T>
+class MachineOperatorTestWithParam
+    : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
+ protected:
+  MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+  const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+  typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
+};
+
+
+namespace {
+
+const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+
+
+const MachineType kMachineTypes[] = {
+    kMachFloat32, kMachFloat64,   kMachInt8,   kMachUint8,  kMachInt16,
+    kMachUint16,  kMachInt32,     kMachUint32, kMachInt64,  kMachUint64,
+    kMachPtr,     kMachAnyTagged, kRepBit,     kRepWord8,   kRepWord16,
+    kRepWord32,   kRepWord64,     kRepFloat32, kRepFloat64, kRepTagged};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Load operator.
+
+
+typedef MachineOperatorTestWithParam<LoadRepresentation>
+    MachineLoadOperatorTest;
+
+
+TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
+}
+
+
+TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const Operator* op = machine.Load(GetParam());
+
+  EXPECT_EQ(2, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(3, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
+}
+
+
+TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(GetParam(),
+            OpParameter<LoadRepresentation>(machine.Load(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
+                        ::testing::Combine(::testing::ValuesIn(kMachineReps),
+                                           ::testing::ValuesIn(kMachineTypes)));
+
+
+// -----------------------------------------------------------------------------
+// Store operator.
+
+
+class MachineStoreOperatorTest
+    : public MachineOperatorTestWithParam<
+          ::testing::tuple<MachineType, WriteBarrierKind> > {
+ protected:
+  StoreRepresentation GetParam() const {
+    return StoreRepresentation(
+        ::testing::get<0>(MachineOperatorTestWithParam<
+            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
+        ::testing::get<1>(MachineOperatorTestWithParam<
+            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+  }
+};
+
+
+TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
+}
+
+
+TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const Operator* op = machine.Store(GetParam());
+
+  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
+}
+
+
+TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(GetParam(),
+            OpParameter<StoreRepresentation>(machine.Store(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+    MachineOperatorTest, MachineStoreOperatorTest,
+    ::testing::Combine(
+        ::testing::ValuesIn(kMachineReps),
+        ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+                           ::testing::Values(kNoWriteBarrier,
+                                             kFullWriteBarrier))));
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+  const Operator* (MachineOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  int value_input_count;
+  int value_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+  return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, input_count, output_count)                      \
+  {                                                                \
+    &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
+        output_count                                               \
+  }
+    PURE(Word32And, 2, 1),                PURE(Word32Or, 2, 1),
+    PURE(Word32Xor, 2, 1),                PURE(Word32Shl, 2, 1),
+    PURE(Word32Shr, 2, 1),                PURE(Word32Sar, 2, 1),
+    PURE(Word32Ror, 2, 1),                PURE(Word32Equal, 2, 1),
+    PURE(Word64And, 2, 1),                PURE(Word64Or, 2, 1),
+    PURE(Word64Xor, 2, 1),                PURE(Word64Shl, 2, 1),
+    PURE(Word64Shr, 2, 1),                PURE(Word64Sar, 2, 1),
+    PURE(Word64Ror, 2, 1),                PURE(Word64Equal, 2, 1),
+    PURE(Int32Add, 2, 1),                 PURE(Int32AddWithOverflow, 2, 2),
+    PURE(Int32Sub, 2, 1),                 PURE(Int32SubWithOverflow, 2, 2),
+    PURE(Int32Mul, 2, 1),                 PURE(Int32Div, 2, 1),
+    PURE(Int32UDiv, 2, 1),                PURE(Int32Mod, 2, 1),
+    PURE(Int32UMod, 2, 1),                PURE(Int32LessThan, 2, 1),
+    PURE(Int32LessThanOrEqual, 2, 1),     PURE(Uint32LessThan, 2, 1),
+    PURE(Uint32LessThanOrEqual, 2, 1),    PURE(Int64Add, 2, 1),
+    PURE(Int64Sub, 2, 1),                 PURE(Int64Mul, 2, 1),
+    PURE(Int64Div, 2, 1),                 PURE(Int64UDiv, 2, 1),
+    PURE(Int64Mod, 2, 1),                 PURE(Int64UMod, 2, 1),
+    PURE(Int64LessThan, 2, 1),            PURE(Int64LessThanOrEqual, 2, 1),
+    PURE(ChangeFloat32ToFloat64, 1, 1),   PURE(ChangeFloat64ToInt32, 1, 1),
+    PURE(ChangeFloat64ToUint32, 1, 1),    PURE(ChangeInt32ToInt64, 1, 1),
+    PURE(ChangeUint32ToFloat64, 1, 1),    PURE(ChangeUint32ToUint64, 1, 1),
+    PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
+    PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
+    PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
+    PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
+    PURE(Float64Sqrt, 1, 1),              PURE(Float64Equal, 2, 1),
+    PURE(Float64LessThan, 2, 1),          PURE(Float64LessThanOrEqual, 2, 1)
+#undef PURE
+};
+
+
+typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
+
+}  // namespace
+
+
+TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
+  const PureOperator& pop = GetParam();
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
+}
+
+
+TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(pop.value_output_count,
+            OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachinePureOperatorTest, MarkedAsPure) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+  EXPECT_TRUE(op->HasProperty(Operator::kPure));
+}
+
+
+TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+  EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+    MachineOperatorTest, MachinePureOperatorTest,
+    ::testing::Combine(::testing::ValuesIn(kMachineReps),
+                       ::testing::ValuesIn(kPureOperators)));
+
+#endif  // GTEST_HAS_COMBINE
+
+
+// -----------------------------------------------------------------------------
+// Pseudo operators.
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
+  MachineOperatorBuilder machine(kRepWord32);
+  EXPECT_EQ(machine.Word32And(), machine.WordAnd());
+  EXPECT_EQ(machine.Word32Or(), machine.WordOr());
+  EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
+  EXPECT_EQ(machine.Word32Shl(), machine.WordShl());
+  EXPECT_EQ(machine.Word32Shr(), machine.WordShr());
+  EXPECT_EQ(machine.Word32Sar(), machine.WordSar());
+  EXPECT_EQ(machine.Word32Ror(), machine.WordRor());
+  EXPECT_EQ(machine.Word32Equal(), machine.WordEqual());
+  EXPECT_EQ(machine.Int32Add(), machine.IntAdd());
+  EXPECT_EQ(machine.Int32Sub(), machine.IntSub());
+  EXPECT_EQ(machine.Int32Mul(), machine.IntMul());
+  EXPECT_EQ(machine.Int32Div(), machine.IntDiv());
+  EXPECT_EQ(machine.Int32UDiv(), machine.IntUDiv());
+  EXPECT_EQ(machine.Int32Mod(), machine.IntMod());
+  EXPECT_EQ(machine.Int32UMod(), machine.IntUMod());
+  EXPECT_EQ(machine.Int32LessThan(), machine.IntLessThan());
+  EXPECT_EQ(machine.Int32LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
+  MachineOperatorBuilder machine(kRepWord64);
+  EXPECT_EQ(machine.Word64And(), machine.WordAnd());
+  EXPECT_EQ(machine.Word64Or(), machine.WordOr());
+  EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
+  EXPECT_EQ(machine.Word64Shl(), machine.WordShl());
+  EXPECT_EQ(machine.Word64Shr(), machine.WordShr());
+  EXPECT_EQ(machine.Word64Sar(), machine.WordSar());
+  EXPECT_EQ(machine.Word64Ror(), machine.WordRor());
+  EXPECT_EQ(machine.Word64Equal(), machine.WordEqual());
+  EXPECT_EQ(machine.Int64Add(), machine.IntAdd());
+  EXPECT_EQ(machine.Int64Sub(), machine.IntSub());
+  EXPECT_EQ(machine.Int64Mul(), machine.IntMul());
+  EXPECT_EQ(machine.Int64Div(), machine.IntDiv());
+  EXPECT_EQ(machine.Int64UDiv(), machine.IntUDiv());
+  EXPECT_EQ(machine.Int64Mod(), machine.IntMod());
+  EXPECT_EQ(machine.Int64UMod(), machine.IntUMod());
+  EXPECT_EQ(machine.Int64LessThan(), machine.IntLessThan());
+  EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
new file mode 100644
index 0000000..2f30bd2
--- /dev/null
+++ b/src/compiler/machine-operator.cc
@@ -0,0 +1,244 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind) {
+  switch (write_barrier_kind) {
+    case kNoWriteBarrier:
+      return os << "NoWriteBarrier";
+    case kFullWriteBarrier:
+      return os << "FullWriteBarrier";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep) {
+  return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+            << ")";
+}
+
+
+template <>
+struct StaticParameterTraits<StoreRepresentation> {
+  static OStream& PrintTo(OStream& os, const StoreRepresentation& rep) {
+    return os << rep;
+  }
+  static int HashCode(const StoreRepresentation& rep) {
+    return rep.machine_type() + rep.write_barrier_kind();
+  }
+  static bool Equals(const StoreRepresentation& rep1,
+                     const StoreRepresentation& rep2) {
+    return rep1 == rep2;
+  }
+};
+
+
+template <>
+struct StaticParameterTraits<LoadRepresentation> {
+  static OStream& PrintTo(OStream& os, LoadRepresentation type) {  // NOLINT
+    return os << type;
+  }
+  static int HashCode(LoadRepresentation type) { return type; }
+  static bool Equals(LoadRepresentation lhs, LoadRepresentation rhs) {
+    return lhs == rhs;
+  }
+};
+
+
+#define PURE_OP_LIST(V)                                                       \
+  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word32Shl, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Shr, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Sar, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Ror, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Equal, Operator::kCommutative, 2, 1)                                \
+  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word64Shl, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Shr, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Sar, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Ror, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Equal, Operator::kCommutative, 2, 1)                                \
+  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+    2)                                                                        \
+  V(Int32Sub, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32SubWithOverflow, Operator::kNoProperties, 2, 2)                      \
+  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int32Div, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32UDiv, Operator::kNoProperties, 2, 1)                                 \
+  V(Int32Mod, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32UMod, Operator::kNoProperties, 2, 1)                                 \
+  V(Int32LessThan, Operator::kNoProperties, 2, 1)                             \
+  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
+  V(Uint32LessThan, Operator::kNoProperties, 2, 1)                            \
+  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 1)                     \
+  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int64Sub, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int64Div, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64UDiv, Operator::kNoProperties, 2, 1)                                 \
+  V(Int64Mod, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64UMod, Operator::kNoProperties, 2, 1)                                 \
+  V(Int64LessThan, Operator::kNoProperties, 2, 1)                             \
+  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1)                    \
+  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1)                      \
+  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1)                     \
+  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1)                      \
+  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1)                        \
+  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1)                     \
+  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1)                      \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1)                  \
+  V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1)                    \
+  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1)                      \
+  V(Float64Add, Operator::kCommutative, 2, 1)                                 \
+  V(Float64Sub, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Mul, Operator::kCommutative, 2, 1)                                 \
+  V(Float64Div, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Mod, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 1)                               \
+  V(Float64Equal, Operator::kCommutative, 2, 1)                               \
+  V(Float64LessThan, Operator::kNoProperties, 2, 1)                           \
+  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
+
+
+#define MACHINE_TYPE_LIST(V) \
+  V(MachFloat32)             \
+  V(MachFloat64)             \
+  V(MachInt8)                \
+  V(MachUint8)               \
+  V(MachInt16)               \
+  V(MachUint16)              \
+  V(MachInt32)               \
+  V(MachUint32)              \
+  V(MachInt64)               \
+  V(MachUint64)              \
+  V(MachAnyTagged)           \
+  V(RepBit)                  \
+  V(RepWord8)                \
+  V(RepWord16)               \
+  V(RepWord32)               \
+  V(RepWord64)               \
+  V(RepFloat32)              \
+  V(RepFloat64)              \
+  V(RepTagged)
+
+
+struct MachineOperatorBuilderImpl {
+#define PURE(Name, properties, input_count, output_count)                 \
+  struct Name##Operator FINAL : public SimpleOperator {                   \
+    Name##Operator()                                                      \
+        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+                         input_count, output_count, #Name) {}             \
+  };                                                                      \
+  Name##Operator k##Name;
+  PURE_OP_LIST(PURE)
+#undef PURE
+
+#define LOAD(Type)                                                            \
+  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> {  \
+    Load##Type##Operator()                                                    \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, 2, 1, \
+              "Load", k##Type) {}                                             \
+  };                                                                          \
+  Load##Type##Operator k##Load##Type;
+  MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+#define STORE(Type)                                                           \
+  struct Store##Type##Operator : public Operator1<StoreRepresentation> {      \
+    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)       \
+        : Operator1<StoreRepresentation>(                                     \
+              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, 3, 0, \
+              "Store", StoreRepresentation(k##Type, write_barrier_kind)) {}   \
+  };                                                                          \
+  struct Store##Type##NoWriteBarrier##Operator FINAL                          \
+      : public Store##Type##Operator {                                        \
+    Store##Type##NoWriteBarrier##Operator()                                   \
+        : Store##Type##Operator(kNoWriteBarrier) {}                           \
+  };                                                                          \
+  struct Store##Type##FullWriteBarrier##Operator FINAL                        \
+      : public Store##Type##Operator {                                        \
+    Store##Type##FullWriteBarrier##Operator()                                 \
+        : Store##Type##Operator(kFullWriteBarrier) {}                         \
+  };                                                                          \
+  Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier;       \
+  Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+  MACHINE_TYPE_LIST(STORE)
+#undef STORE
+};
+
+
+static base::LazyInstance<MachineOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+MachineOperatorBuilder::MachineOperatorBuilder(MachineType word)
+    : impl_(kImpl.Get()), word_(word) {
+  DCHECK(word == kRepWord32 || word == kRepWord64);
+}
+
+
+#define PURE(Name, properties, input_count, output_count) \
+  const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+  switch (rep) {
+#define LOAD(Type) \
+  case k##Type:    \
+    return &impl_.k##Load##Type;
+    MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
+  switch (rep.machine_type()) {
+#define STORE(Type)                                     \
+  case k##Type:                                         \
+    switch (rep.write_barrier_kind()) {                 \
+      case kNoWriteBarrier:                             \
+        return &impl_.k##Store##Type##NoWriteBarrier;   \
+      case kFullWriteBarrier:                           \
+        return &impl_.k##Store##Type##FullWriteBarrier; \
+    }                                                   \
+    break;
+    MACHINE_TYPE_LIST(STORE)
+#undef STORE
+
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
new file mode 100644
index 0000000..92c8ac4
--- /dev/null
+++ b/src/compiler/machine-operator.h
@@ -0,0 +1,187 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
+#define V8_COMPILER_MACHINE_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct MachineOperatorBuilderImpl;
+class Operator;
+
+
+// Supported write barrier modes.
+enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind);
+
+
+typedef MachineType LoadRepresentation;
+
+
+// A Store needs a MachineType and a WriteBarrierKind
+// in order to emit the correct write barrier.
+class StoreRepresentation FINAL {
+ public:
+  StoreRepresentation(MachineType machine_type,
+                      WriteBarrierKind write_barrier_kind)
+      : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+
+  MachineType machine_type() const { return machine_type_; }
+  WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
+
+ private:
+  MachineType machine_type_;
+  WriteBarrierKind write_barrier_kind_;
+};
+
+inline bool operator==(const StoreRepresentation& rep1,
+                       const StoreRepresentation& rep2) {
+  return rep1.machine_type() == rep2.machine_type() &&
+         rep1.write_barrier_kind() == rep2.write_barrier_kind();
+}
+
+inline bool operator!=(const StoreRepresentation& rep1,
+                       const StoreRepresentation& rep2) {
+  return !(rep1 == rep2);
+}
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep);
+
+
+// Interface for building machine-level operators. These operators are
+// machine-level but machine-independent and thus define a language suitable
+// for generating code to run on architectures such as ia32, x64, arm, etc.
+class MachineOperatorBuilder FINAL {
+ public:
+  explicit MachineOperatorBuilder(MachineType word = kMachPtr);
+
+  const Operator* Word32And();
+  const Operator* Word32Or();
+  const Operator* Word32Xor();
+  const Operator* Word32Shl();
+  const Operator* Word32Shr();
+  const Operator* Word32Sar();
+  const Operator* Word32Ror();
+  const Operator* Word32Equal();
+
+  const Operator* Word64And();
+  const Operator* Word64Or();
+  const Operator* Word64Xor();
+  const Operator* Word64Shl();
+  const Operator* Word64Shr();
+  const Operator* Word64Sar();
+  const Operator* Word64Ror();
+  const Operator* Word64Equal();
+
+  const Operator* Int32Add();
+  const Operator* Int32AddWithOverflow();
+  const Operator* Int32Sub();
+  const Operator* Int32SubWithOverflow();
+  const Operator* Int32Mul();
+  const Operator* Int32Div();
+  const Operator* Int32UDiv();
+  const Operator* Int32Mod();
+  const Operator* Int32UMod();
+  const Operator* Int32LessThan();
+  const Operator* Int32LessThanOrEqual();
+  const Operator* Uint32LessThan();
+  const Operator* Uint32LessThanOrEqual();
+
+  const Operator* Int64Add();
+  const Operator* Int64Sub();
+  const Operator* Int64Mul();
+  const Operator* Int64Div();
+  const Operator* Int64UDiv();
+  const Operator* Int64Mod();
+  const Operator* Int64UMod();
+  const Operator* Int64LessThan();
+  const Operator* Int64LessThanOrEqual();
+
+  // These operators change the representation of numbers while preserving the
+  // value of the number. Narrowing operators assume the input is representable
+  // in the target type and are *not* defined for other inputs.
+  // Use narrowing change operators only when there is a static guarantee that
+  // the input value is representable in the target value.
+  const Operator* ChangeFloat32ToFloat64();
+  const Operator* ChangeFloat64ToInt32();   // narrowing
+  const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* ChangeInt32ToFloat64();
+  const Operator* ChangeInt32ToInt64();
+  const Operator* ChangeUint32ToFloat64();
+  const Operator* ChangeUint32ToUint64();
+
+  // These operators truncate numbers, both changing the representation of
+  // the number and mapping multiple input values onto the same output value.
+  const Operator* TruncateFloat64ToFloat32();
+  const Operator* TruncateFloat64ToInt32();  // JavaScript semantics.
+  const Operator* TruncateInt64ToInt32();
+
+  // Floating point operators always operate with IEEE 754 round-to-nearest.
+  const Operator* Float64Add();
+  const Operator* Float64Sub();
+  const Operator* Float64Mul();
+  const Operator* Float64Div();
+  const Operator* Float64Mod();
+  const Operator* Float64Sqrt();
+
+  // Floating point comparisons complying to IEEE 754.
+  const Operator* Float64Equal();
+  const Operator* Float64LessThan();
+  const Operator* Float64LessThanOrEqual();
+
+  // load [base + index]
+  const Operator* Load(LoadRepresentation rep);
+
+  // store [base + index], value
+  const Operator* Store(StoreRepresentation rep);
+
+  // Target machine word-size assumed by this builder.
+  bool Is32() const { return word() == kRepWord32; }
+  bool Is64() const { return word() == kRepWord64; }
+  MachineType word() const { return word_; }
+
+// Pseudo operators that translate to 32/64-bit operators depending on the
+// word-size of the target machine assumed by this builder.
+#define PSEUDO_OP_LIST(V) \
+  V(Word, And)            \
+  V(Word, Or)             \
+  V(Word, Xor)            \
+  V(Word, Shl)            \
+  V(Word, Shr)            \
+  V(Word, Sar)            \
+  V(Word, Ror)            \
+  V(Word, Equal)          \
+  V(Int, Add)             \
+  V(Int, Sub)             \
+  V(Int, Mul)             \
+  V(Int, Div)             \
+  V(Int, UDiv)            \
+  V(Int, Mod)             \
+  V(Int, UMod)            \
+  V(Int, LessThan)        \
+  V(Int, LessThanOrEqual)
+#define PSEUDO_OP(Prefix, Suffix)                                \
+  const Operator* Prefix##Suffix() {                             \
+    return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
+  }
+  PSEUDO_OP_LIST(PSEUDO_OP)
+#undef PSEUDO_OP
+#undef PSEUDO_OP_LIST
+
+ private:
+  const MachineOperatorBuilderImpl& impl_;
+  const MachineType word_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_H_
diff --git a/src/compiler/machine-type.cc b/src/compiler/machine-type.cc
new file mode 100644
index 0000000..94aa124
--- /dev/null
+++ b/src/compiler/machine-type.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-type.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define PRINT(bit)         \
+  if (type & bit) {        \
+    if (before) os << "|"; \
+    os << #bit;            \
+    before = true;         \
+  }
+
+
+OStream& operator<<(OStream& os, const MachineType& type) {
+  bool before = false;
+  PRINT(kRepBit);
+  PRINT(kRepWord8);
+  PRINT(kRepWord16);
+  PRINT(kRepWord32);
+  PRINT(kRepWord64);
+  PRINT(kRepFloat32);
+  PRINT(kRepFloat64);
+  PRINT(kRepTagged);
+
+  PRINT(kTypeBool);
+  PRINT(kTypeInt32);
+  PRINT(kTypeUint32);
+  PRINT(kTypeInt64);
+  PRINT(kTypeUint64);
+  PRINT(kTypeNumber);
+  PRINT(kTypeAny);
+  return os;
+}
+
+
+#undef PRINT
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h
new file mode 100644
index 0000000..88b482c
--- /dev/null
+++ b/src/compiler/machine-type.h
@@ -0,0 +1,173 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_TYPE_H_
+#define V8_COMPILER_MACHINE_TYPE_H_
+
+#include "src/base/bits.h"
+#include "src/globals.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Machine-level types and representations.
+// TODO(titzer): Use the real type system instead of MachineType.
+enum MachineType {
+  // Representations.
+  kRepBit = 1 << 0,
+  kRepWord8 = 1 << 1,
+  kRepWord16 = 1 << 2,
+  kRepWord32 = 1 << 3,
+  kRepWord64 = 1 << 4,
+  kRepFloat32 = 1 << 5,
+  kRepFloat64 = 1 << 6,
+  kRepTagged = 1 << 7,
+
+  // Types.
+  kTypeBool = 1 << 8,
+  kTypeInt32 = 1 << 9,
+  kTypeUint32 = 1 << 10,
+  kTypeInt64 = 1 << 11,
+  kTypeUint64 = 1 << 12,
+  kTypeNumber = 1 << 13,
+  kTypeAny = 1 << 14,
+
+  // Machine types.
+  kMachNone = 0,
+  kMachFloat32 = kRepFloat32 | kTypeNumber,
+  kMachFloat64 = kRepFloat64 | kTypeNumber,
+  kMachInt8 = kRepWord8 | kTypeInt32,
+  kMachUint8 = kRepWord8 | kTypeUint32,
+  kMachInt16 = kRepWord16 | kTypeInt32,
+  kMachUint16 = kRepWord16 | kTypeUint32,
+  kMachInt32 = kRepWord32 | kTypeInt32,
+  kMachUint32 = kRepWord32 | kTypeUint32,
+  kMachInt64 = kRepWord64 | kTypeInt64,
+  kMachUint64 = kRepWord64 | kTypeUint64,
+  kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
+  kMachAnyTagged = kRepTagged | kTypeAny
+};
+
+OStream& operator<<(OStream& os, const MachineType& type);
+
+typedef uint16_t MachineTypeUnion;
+
+// Globally useful machine types and constants.
+const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
+                                  kRepWord32 | kRepWord64 | kRepFloat32 |
+                                  kRepFloat64 | kRepTagged;
+const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
+                                   kTypeInt64 | kTypeUint64 | kTypeNumber |
+                                   kTypeAny;
+
+// Gets only the type of the given type.
+inline MachineType TypeOf(MachineType machine_type) {
+  int result = machine_type & kTypeMask;
+  return static_cast<MachineType>(result);
+}
+
+// Gets only the representation of the given type.
+inline MachineType RepresentationOf(MachineType machine_type) {
+  int result = machine_type & kRepMask;
+  CHECK(base::bits::IsPowerOfTwo32(result));
+  return static_cast<MachineType>(result);
+}
+
+// Gets the element size in bytes of the machine type.
+inline int ElementSizeOf(MachineType machine_type) {
+  switch (RepresentationOf(machine_type)) {
+    case kRepBit:
+    case kRepWord8:
+      return 1;
+    case kRepWord16:
+      return 2;
+    case kRepWord32:
+    case kRepFloat32:
+      return 4;
+    case kRepWord64:
+    case kRepFloat64:
+      return 8;
+    case kRepTagged:
+      return kPointerSize;
+    default:
+      UNREACHABLE();
+      return kPointerSize;
+  }
+}
+
+// Describes the inputs and outputs of a function or call.
+template <typename T>
+class Signature : public ZoneObject {
+ public:
+  Signature(size_t return_count, size_t parameter_count, T* reps)
+      : return_count_(return_count),
+        parameter_count_(parameter_count),
+        reps_(reps) {}
+
+  size_t return_count() const { return return_count_; }
+  size_t parameter_count() const { return parameter_count_; }
+
+  T GetParam(size_t index) const {
+    DCHECK(index < parameter_count_);
+    return reps_[return_count_ + index];
+  }
+
+  T GetReturn(size_t index = 0) const {
+    DCHECK(index < return_count_);
+    return reps_[index];
+  }
+
+  // For incrementally building signatures.
+  class Builder {
+   public:
+    Builder(Zone* zone, size_t return_count, size_t parameter_count)
+        : return_count_(return_count),
+          parameter_count_(parameter_count),
+          zone_(zone),
+          rcursor_(0),
+          pcursor_(0),
+          buffer_(zone->NewArray<T>(
+              static_cast<int>(return_count + parameter_count))) {}
+
+    const size_t return_count_;
+    const size_t parameter_count_;
+
+    void AddReturn(T val) {
+      DCHECK(rcursor_ < return_count_);
+      buffer_[rcursor_++] = val;
+    }
+    void AddParam(T val) {
+      DCHECK(pcursor_ < parameter_count_);
+      buffer_[return_count_ + pcursor_++] = val;
+    }
+    Signature<T>* Build() {
+      DCHECK(rcursor_ == return_count_);
+      DCHECK(pcursor_ == parameter_count_);
+      return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
+    }
+
+   private:
+    Zone* zone_;
+    size_t rcursor_;
+    size_t pcursor_;
+    T* buffer_;
+  };
+
+ protected:
+  size_t return_count_;
+  size_t parameter_count_;
+  T* reps_;
+};
+
+typedef Signature<MachineType> MachineSignature;
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
new file mode 100644
index 0000000..79f1abf
--- /dev/null
+++ b/src/compiler/node-aux-data-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
+#define V8_COMPILER_NODE_AUX_DATA_INL_H_
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class T>
+NodeAuxData<T>::NodeAuxData(Zone* zone)
+    : aux_data_(zone) {}
+
+
+template <class T>
+void NodeAuxData<T>::Set(Node* node, const T& data) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    aux_data_.resize(id + 1);
+  }
+  aux_data_[id] = data;
+}
+
+
+template <class T>
+T NodeAuxData<T>::Get(Node* node) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    return T();
+  }
+  return aux_data_[id];
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
new file mode 100644
index 0000000..7acce33
--- /dev/null
+++ b/src/compiler/node-aux-data.h
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_H_
+#define V8_COMPILER_NODE_AUX_DATA_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+template <class T>
+class NodeAuxData {
+ public:
+  inline explicit NodeAuxData(Zone* zone);
+
+  inline void Set(Node* node, const T& data);
+  inline T Get(Node* node);
+
+ private:
+  ZoneVector<T> aux_data_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
new file mode 100644
index 0000000..7cda167
--- /dev/null
+++ b/src/compiler/node-cache.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define INITIAL_SIZE 16
+#define LINEAR_PROBE 5
+
+template <typename Key>
+int32_t NodeCacheHash(Key key) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+template <>
+inline int32_t NodeCacheHash(int32_t key) {
+  return ComputeIntegerHash(key, 0);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(int64_t key) {
+  return ComputeLongHash(key);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(double key) {
+  return ComputeLongHash(bit_cast<int64_t>(key));
+}
+
+
+template <>
+inline int32_t NodeCacheHash(void* key) {
+  return ComputePointerHash(key);
+}
+
+
+template <typename Key>
+bool NodeCache<Key>::Resize(Zone* zone) {
+  if (size_ >= max_) return false;  // Don't grow past the maximum size.
+
+  // Allocate a new block of entries 4x the size.
+  Entry* old_entries = entries_;
+  int old_size = size_ + LINEAR_PROBE;
+  size_ = size_ * 4;
+  int num_entries = size_ + LINEAR_PROBE;
+  entries_ = zone->NewArray<Entry>(num_entries);
+  memset(entries_, 0, sizeof(Entry) * num_entries);
+
+  // Insert the old entries into the new block.
+  for (int i = 0; i < old_size; i++) {
+    Entry* old = &old_entries[i];
+    if (old->value_ != NULL) {
+      int hash = NodeCacheHash(old->key_);
+      int start = hash & (size_ - 1);
+      int end = start + LINEAR_PROBE;
+      for (int j = start; j < end; j++) {
+        Entry* entry = &entries_[j];
+        if (entry->value_ == NULL) {
+          entry->key_ = old->key_;
+          entry->value_ = old->value_;
+          break;
+        }
+      }
+    }
+  }
+  return true;
+}
+
+
+template <typename Key>
+Node** NodeCache<Key>::Find(Zone* zone, Key key) {
+  int32_t hash = NodeCacheHash(key);
+  if (entries_ == NULL) {
+    // Allocate the initial entries and insert the first entry.
+    int num_entries = INITIAL_SIZE + LINEAR_PROBE;
+    entries_ = zone->NewArray<Entry>(num_entries);
+    size_ = INITIAL_SIZE;
+    memset(entries_, 0, sizeof(Entry) * num_entries);
+    Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
+    entry->key_ = key;
+    return &entry->value_;
+  }
+
+  while (true) {
+    // Search up to N entries after (linear probing).
+    int start = hash & (size_ - 1);
+    int end = start + LINEAR_PROBE;
+    for (int i = start; i < end; i++) {
+      Entry* entry = &entries_[i];
+      if (entry->key_ == key) return &entry->value_;
+      if (entry->value_ == NULL) {
+        entry->key_ = key;
+        return &entry->value_;
+      }
+    }
+
+    if (!Resize(zone)) break;  // Don't grow past the maximum size.
+  }
+
+  // If resized to maximum and still didn't find space, overwrite an entry.
+  Entry* entry = &entries_[hash & (size_ - 1)];
+  entry->key_ = key;
+  entry->value_ = NULL;
+  return &entry->value_;
+}
+
+
+template class NodeCache<int64_t>;
+template class NodeCache<int32_t>;
+template class NodeCache<void*>;
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
new file mode 100644
index 0000000..35352ea
--- /dev/null
+++ b/src/compiler/node-cache.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_CACHE_H_
+#define V8_COMPILER_NODE_CACHE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A cache for nodes based on a key. Useful for implementing canonicalization of
+// nodes such as constants, parameters, etc.
+template <typename Key>
+class NodeCache {
+ public:
+  explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
+
+  // Search for node associated with {key} and return a pointer to a memory
+  // location in this cache that stores an entry for the key. If the location
+  // returned by this method contains a non-NULL node, the caller can use that
+  // node. Otherwise it is the responsibility of the caller to fill the entry
+  // with a new node.
+  // Note that a previous cache entry may be overwritten if the cache becomes
+  // too full or encounters too many hash collisions.
+  Node** Find(Zone* zone, Key key);
+
+ private:
+  struct Entry {
+    Key key_;
+    Node* value_;
+  };
+
+  Entry* entries_;  // lazily-allocated hash entries.
+  int32_t size_;
+  int32_t max_;
+
+  bool Resize(Zone* zone);
+};
+
+// Various default cache types.
+typedef NodeCache<int64_t> Int64NodeCache;
+typedef NodeCache<int32_t> Int32NodeCache;
+typedef NodeCache<void*> PtrNodeCache;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_CACHE_H_
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
new file mode 100644
index 0000000..e62eaee
--- /dev/null
+++ b/src/compiler/node-matchers.h
@@ -0,0 +1,146 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MATCHERS_H_
+#define V8_COMPILER_NODE_MATCHERS_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A pattern matcher for nodes.
+struct NodeMatcher {
+  explicit NodeMatcher(Node* node) : node_(node) {}
+
+  Node* node() const { return node_; }
+  const Operator* op() const { return node()->op(); }
+  IrOpcode::Value opcode() const { return node()->opcode(); }
+
+  bool HasProperty(Operator::Property property) const {
+    return op()->HasProperty(property);
+  }
+  Node* InputAt(int index) const { return node()->InputAt(index); }
+
+#define DEFINE_IS_OPCODE(Opcode) \
+  bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
+  ALL_OP_LIST(DEFINE_IS_OPCODE)
+#undef DEFINE_IS_OPCODE
+
+ private:
+  Node* node_;
+};
+
+
+// A pattern matcher for abitrary value constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct ValueMatcher : public NodeMatcher {
+  explicit ValueMatcher(Node* node)
+      : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
+    if (has_value_) {
+      value_ = OpParameter<T>(node);
+    }
+  }
+
+  bool HasValue() const { return has_value_; }
+  const T& Value() const {
+    DCHECK(HasValue());
+    return value_;
+  }
+
+  bool Is(const T& value) const {
+    return this->HasValue() && this->Value() == value;
+  }
+
+  bool IsInRange(const T& low, const T& high) const {
+    return this->HasValue() && low <= this->Value() && this->Value() <= high;
+  }
+
+ private:
+  T value_;
+  bool has_value_;
+};
+
+
+// A pattern matcher for integer constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
+  explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+
+  bool IsPowerOf2() const {
+    return this->HasValue() && this->Value() > 0 &&
+           (this->Value() & (this->Value() - 1)) == 0;
+  }
+};
+
+typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
+typedef IntMatcher<uint32_t, IrOpcode::kInt32Constant> Uint32Matcher;
+typedef IntMatcher<int64_t, IrOpcode::kInt64Constant> Int64Matcher;
+typedef IntMatcher<uint64_t, IrOpcode::kInt64Constant> Uint64Matcher;
+
+
+// A pattern matcher for floating point constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
+  explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+
+  bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+};
+
+typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
+typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher;
+typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
+
+
+// A pattern matcher for heap object constants.
+template <typename T>
+struct HeapObjectMatcher FINAL
+    : public ValueMatcher<Unique<T>, IrOpcode::kHeapConstant> {
+  explicit HeapObjectMatcher(Node* node)
+      : ValueMatcher<Unique<T>, IrOpcode::kHeapConstant>(node) {}
+};
+
+
+// For shorter pattern matching code, this struct matches both the left and
+// right hand sides of a binary operation and can put constants on the right
+// if they appear on the left hand side of a commutative operation.
+template <typename Left, typename Right>
+struct BinopMatcher FINAL : public NodeMatcher {
+  explicit BinopMatcher(Node* node)
+      : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+    if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
+  }
+
+  const Left& left() const { return left_; }
+  const Right& right() const { return right_; }
+
+  bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+  bool LeftEqualsRight() const { return left().node() == right().node(); }
+
+ private:
+  void PutConstantOnRight() {
+    if (left().HasValue() && !right().HasValue()) {
+      std::swap(left_, right_);
+      node()->ReplaceInput(0, left().node());
+      node()->ReplaceInput(1, right().node());
+    }
+  }
+
+  Left left_;
+  Right right_;
+};
+
+typedef BinopMatcher<Int32Matcher, Int32Matcher> Int32BinopMatcher;
+typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
+typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
+typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
+typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_MATCHERS_H_
diff --git a/src/compiler/node-properties-inl.h b/src/compiler/node-properties-inl.h
new file mode 100644
index 0000000..3f6d531
--- /dev/null
+++ b/src/compiler/node-properties-inl.h
@@ -0,0 +1,212 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
+#define V8_COMPILER_NODE_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Input layout.
+// Inputs are always arranged in order as follows:
+//     0 [ values, context, effects, control ] node->InputCount()
+
+inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
+
+inline int NodeProperties::FirstContextIndex(Node* node) {
+  return PastValueIndex(node);
+}
+
+inline int NodeProperties::FirstFrameStateIndex(Node* node) {
+  return PastContextIndex(node);
+}
+
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+  return PastFrameStateIndex(node);
+}
+
+inline int NodeProperties::FirstControlIndex(Node* node) {
+  return PastEffectIndex(node);
+}
+
+
+inline int NodeProperties::PastValueIndex(Node* node) {
+  return FirstValueIndex(node) +
+         OperatorProperties::GetValueInputCount(node->op());
+}
+
+inline int NodeProperties::PastContextIndex(Node* node) {
+  return FirstContextIndex(node) +
+         OperatorProperties::GetContextInputCount(node->op());
+}
+
+inline int NodeProperties::PastFrameStateIndex(Node* node) {
+  return FirstFrameStateIndex(node) +
+         OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
+inline int NodeProperties::PastEffectIndex(Node* node) {
+  return FirstEffectIndex(node) +
+         OperatorProperties::GetEffectInputCount(node->op());
+}
+
+inline int NodeProperties::PastControlIndex(Node* node) {
+  return FirstControlIndex(node) +
+         OperatorProperties::GetControlInputCount(node->op());
+}
+
+
+// -----------------------------------------------------------------------------
+// Input accessors.
+
+inline Node* NodeProperties::GetValueInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetValueInputCount(node->op()));
+  return node->InputAt(FirstValueIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetContextInput(Node* node) {
+  DCHECK(OperatorProperties::HasContextInput(node->op()));
+  return node->InputAt(FirstContextIndex(node));
+}
+
+inline Node* NodeProperties::GetFrameStateInput(Node* node) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  return node->InputAt(FirstFrameStateIndex(node));
+}
+
+inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetEffectInputCount(node->op()));
+  return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetControlInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetControlInputCount(node->op()));
+  return node->InputAt(FirstControlIndex(node) + index);
+}
+
+inline int NodeProperties::GetFrameStateIndex(Node* node) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  return FirstFrameStateIndex(node);
+}
+
+// -----------------------------------------------------------------------------
+// Edge kinds.
+
+inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+  // TODO(titzer): edge.index() is linear time;
+  // edges maybe need to be marked as value/effect/control.
+  if (num == 0) return false;
+  int index = edge.index();
+  return first <= index && index < first + num;
+}
+
+inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstValueIndex(node),
+                      OperatorProperties::GetValueInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstContextIndex(node),
+                      OperatorProperties::GetContextInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstEffectIndex(node),
+                      OperatorProperties::GetEffectInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstControlIndex(node),
+                      OperatorProperties::GetControlInputCount(node->op()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous predicates.
+
+inline bool NodeProperties::IsControl(Node* node) {
+  return IrOpcode::IsControlOpcode(node->opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous mutators.
+
+inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
+  node->ReplaceInput(FirstControlIndex(node), control);
+}
+
+inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
+                                               int index) {
+  DCHECK(index < OperatorProperties::GetEffectInputCount(node->op()));
+  return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
+}
+
+inline void NodeProperties::ReplaceFrameStateInput(Node* node,
+                                                   Node* frame_state) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
+}
+
+inline void NodeProperties::RemoveNonValueInputs(Node* node) {
+  node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op()));
+}
+
+
+// Replace value uses of {node} with {value} and effect uses of {node} with
+// {effect}. If {effect == NULL}, then use the effect input to {node}.
+inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
+                                             Node* effect) {
+  DCHECK(!OperatorProperties::HasControlOutput(node->op()));
+  if (effect == NULL && OperatorProperties::HasEffectInput(node->op())) {
+    effect = NodeProperties::GetEffectInput(node);
+  }
+
+  // Requires distinguishing between value and effect edges.
+  UseIter iter = node->uses().begin();
+  while (iter != node->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      DCHECK_NE(NULL, effect);
+      iter = iter.UpdateToAndIncrement(effect);
+    } else {
+      iter = iter.UpdateToAndIncrement(value);
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Type Bounds.
+
+inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); }
+
+inline void NodeProperties::SetBounds(Node* node, Bounds b) {
+  node->set_bounds(b);
+}
+
+
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
new file mode 100644
index 0000000..94bd731
--- /dev/null
+++ b/src/compiler/node-properties.h
@@ -0,0 +1,64 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_H_
+#define V8_COMPILER_NODE_PROPERTIES_H_
+
+#include "src/compiler/node.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+// A facade that simplifies access to the different kinds of inputs to a node.
+class NodeProperties {
+ public:
+  static inline Node* GetValueInput(Node* node, int index);
+  static inline Node* GetContextInput(Node* node);
+  static inline Node* GetFrameStateInput(Node* node);
+  static inline Node* GetEffectInput(Node* node, int index = 0);
+  static inline Node* GetControlInput(Node* node, int index = 0);
+
+  static inline int GetFrameStateIndex(Node* node);
+
+  static inline bool IsValueEdge(Node::Edge edge);
+  static inline bool IsContextEdge(Node::Edge edge);
+  static inline bool IsEffectEdge(Node::Edge edge);
+  static inline bool IsControlEdge(Node::Edge edge);
+
+  static inline bool IsControl(Node* node);
+
+  static inline void ReplaceControlInput(Node* node, Node* control);
+  static inline void ReplaceEffectInput(Node* node, Node* effect,
+                                        int index = 0);
+  static inline void ReplaceFrameStateInput(Node* node, Node* frame_state);
+  static inline void RemoveNonValueInputs(Node* node);
+  static inline void ReplaceWithValue(Node* node, Node* value,
+                                      Node* effect = NULL);
+
+  static inline Bounds GetBounds(Node* node);
+  static inline void SetBounds(Node* node, Bounds bounds);
+
+  static inline int FirstValueIndex(Node* node);
+  static inline int FirstContextIndex(Node* node);
+  static inline int FirstFrameStateIndex(Node* node);
+  static inline int FirstEffectIndex(Node* node);
+  static inline int FirstControlIndex(Node* node);
+  static inline int PastValueIndex(Node* node);
+  static inline int PastContextIndex(Node* node);
+  static inline int PastFrameStateIndex(Node* node);
+  static inline int PastEffectIndex(Node* node);
+  static inline int PastControlIndex(Node* node);
+
+  static inline bool IsInputRange(Node::Edge edge, int first, int count);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_H_
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
new file mode 100644
index 0000000..7df736e
--- /dev/null
+++ b/src/compiler/node.cc
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void Node::Kill() {
+  DCHECK_NOT_NULL(op());
+  RemoveAllInputs();
+  DCHECK(uses().empty());
+}
+
+
+void Node::CollectProjections(NodeVector* projections) {
+  for (size_t i = 0; i < projections->size(); i++) {
+    (*projections)[i] = NULL;
+  }
+  for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+    if ((*i)->opcode() != IrOpcode::kProjection) continue;
+    size_t index = OpParameter<size_t>(*i);
+    DCHECK_LT(index, projections->size());
+    DCHECK_EQ(NULL, (*projections)[index]);
+    (*projections)[index] = *i;
+  }
+}
+
+
+Node* Node::FindProjection(size_t projection_index) {
+  for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+    if ((*i)->opcode() == IrOpcode::kProjection &&
+        OpParameter<size_t>(*i) == projection_index) {
+      return *i;
+    }
+  }
+  return NULL;
+}
+
+
+OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
+
+
+OStream& operator<<(OStream& os, const Node& n) {
+  os << n.id() << ": " << *n.op();
+  if (n.op()->InputCount() != 0) {
+    os << "(";
+    for (int i = 0; i < n.op()->InputCount(); ++i) {
+      if (i != 0) os << ", ";
+      os << n.InputAt(i)->id();
+    }
+    os << ")";
+  }
+  return os;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/node.h b/src/compiler/node.h
new file mode 100644
index 0000000..c3f5a53
--- /dev/null
+++ b/src/compiler/node.h
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_H_
+#define V8_COMPILER_NODE_H_
+
+#include <deque>
+#include <set>
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeData {
+ public:
+  const Operator* op() const { return op_; }
+  void set_op(const Operator* op) { op_ = op; }
+
+  IrOpcode::Value opcode() const {
+    DCHECK(op_->opcode() <= IrOpcode::kLast);
+    return static_cast<IrOpcode::Value>(op_->opcode());
+  }
+
+  Bounds bounds() { return bounds_; }
+
+ protected:
+  const Operator* op_;
+  Bounds bounds_;
+  explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
+
+  friend class NodeProperties;
+  void set_bounds(Bounds b) { bounds_ = b; }
+};
+
+// A Node is the basic primitive of an IR graph. In addition to the members
+// inherited from Vector, Nodes only contain a mutable Operator that may change
+// during compilation, e.g. during lowering passes.  Other information that
+// needs to be associated with Nodes during compilation must be stored
+// out-of-line indexed by the Node's id.
+class Node FINAL : public GenericNode<NodeData, Node> {
+ public:
+  Node(GenericGraphBase* graph, int input_count)
+      : GenericNode<NodeData, Node>(graph, input_count) {}
+
+  void Initialize(const Operator* op) { set_op(op); }
+
+  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+  void Kill();
+
+  void CollectProjections(ZoneVector<Node*>* projections);
+  Node* FindProjection(size_t projection_index);
+};
+
+OStream& operator<<(OStream& os, const Node& n);
+
+typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+
+typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
+typedef NodeSet::iterator NodeSetIter;
+typedef NodeSet::reverse_iterator NodeSetRIter;
+
+typedef ZoneVector<Node*> NodeVector;
+typedef NodeVector::iterator NodeVectorIter;
+typedef NodeVector::const_iterator NodeVectorConstIter;
+typedef NodeVector::reverse_iterator NodeVectorRIter;
+
+typedef ZoneVector<NodeVector> NodeVectorVector;
+typedef NodeVectorVector::iterator NodeVectorVectorIter;
+typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
+
+typedef Node::Uses::iterator UseIter;
+typedef Node::Inputs::iterator InputIter;
+
+// Helper to extract parameters from Operator1<*> nodes.
+template <typename T>
+static inline const T& OpParameter(const Node* node) {
+  return OpParameter<T>(node->op());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_H_
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
new file mode 100644
index 0000000..e210abd
--- /dev/null
+++ b/src/compiler/opcodes.h
@@ -0,0 +1,310 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPCODES_H_
+#define V8_COMPILER_OPCODES_H_
+
+// Opcodes for control operators.
+#define INNER_CONTROL_OP_LIST(V) \
+  V(Dead)                  \
+  V(Loop)                  \
+  V(Branch)                \
+  V(IfTrue)                \
+  V(IfFalse)               \
+  V(Merge)                 \
+  V(Return)                \
+  V(Throw)
+
+#define CONTROL_OP_LIST(V) \
+  INNER_CONTROL_OP_LIST(V) \
+  V(Start)                 \
+  V(End)
+
+// Opcodes for common operators.
+#define LEAF_OP_LIST(V) \
+  V(Int32Constant)      \
+  V(Int64Constant)      \
+  V(Float32Constant)    \
+  V(Float64Constant)    \
+  V(ExternalConstant)   \
+  V(NumberConstant)     \
+  V(HeapConstant)
+
+#define INNER_OP_LIST(V) \
+  V(Phi)                 \
+  V(EffectPhi)           \
+  V(ControlEffect)       \
+  V(ValueEffect)         \
+  V(Finish)              \
+  V(FrameState)          \
+  V(StateValues)         \
+  V(Call)                \
+  V(Parameter)           \
+  V(Projection)
+
+#define COMMON_OP_LIST(V) \
+  LEAF_OP_LIST(V)         \
+  INNER_OP_LIST(V)
+
+// Opcodes for JavaScript operators.
+#define JS_COMPARE_BINOP_LIST(V) \
+  V(JSEqual)                     \
+  V(JSNotEqual)                  \
+  V(JSStrictEqual)               \
+  V(JSStrictNotEqual)            \
+  V(JSLessThan)                  \
+  V(JSGreaterThan)               \
+  V(JSLessThanOrEqual)           \
+  V(JSGreaterThanOrEqual)
+
+#define JS_BITWISE_BINOP_LIST(V) \
+  V(JSBitwiseOr)                 \
+  V(JSBitwiseXor)                \
+  V(JSBitwiseAnd)                \
+  V(JSShiftLeft)                 \
+  V(JSShiftRight)                \
+  V(JSShiftRightLogical)
+
+#define JS_ARITH_BINOP_LIST(V) \
+  V(JSAdd)                     \
+  V(JSSubtract)                \
+  V(JSMultiply)                \
+  V(JSDivide)                  \
+  V(JSModulus)
+
+#define JS_SIMPLE_BINOP_LIST(V) \
+  JS_COMPARE_BINOP_LIST(V)      \
+  JS_BITWISE_BINOP_LIST(V)      \
+  JS_ARITH_BINOP_LIST(V)
+
+#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
+
+#define JS_CONVERSION_UNOP_LIST(V) \
+  V(JSToBoolean)                   \
+  V(JSToNumber)                    \
+  V(JSToString)                    \
+  V(JSToName)                      \
+  V(JSToObject)
+
+#define JS_OTHER_UNOP_LIST(V) \
+  V(JSTypeOf)
+
+#define JS_SIMPLE_UNOP_LIST(V) \
+  JS_LOGIC_UNOP_LIST(V)        \
+  JS_CONVERSION_UNOP_LIST(V)   \
+  JS_OTHER_UNOP_LIST(V)
+
+#define JS_OBJECT_OP_LIST(V) \
+  V(JSCreate)                \
+  V(JSLoadProperty)          \
+  V(JSLoadNamed)             \
+  V(JSStoreProperty)         \
+  V(JSStoreNamed)            \
+  V(JSDeleteProperty)        \
+  V(JSHasProperty)           \
+  V(JSInstanceOf)
+
+#define JS_CONTEXT_OP_LIST(V) \
+  V(JSLoadContext)            \
+  V(JSStoreContext)           \
+  V(JSCreateFunctionContext)  \
+  V(JSCreateCatchContext)     \
+  V(JSCreateWithContext)      \
+  V(JSCreateBlockContext)     \
+  V(JSCreateModuleContext)    \
+  V(JSCreateGlobalContext)
+
+#define JS_OTHER_OP_LIST(V) \
+  V(JSCallConstruct)        \
+  V(JSCallFunction)         \
+  V(JSCallRuntime)          \
+  V(JSYield)                \
+  V(JSDebugger)
+
+#define JS_OP_LIST(V)     \
+  JS_SIMPLE_BINOP_LIST(V) \
+  JS_SIMPLE_UNOP_LIST(V)  \
+  JS_OBJECT_OP_LIST(V)    \
+  JS_CONTEXT_OP_LIST(V)   \
+  JS_OTHER_OP_LIST(V)
+
+// Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_OP_LIST(V) \
+  V(BooleanNot)               \
+  V(BooleanToNumber)          \
+  V(NumberEqual)              \
+  V(NumberLessThan)           \
+  V(NumberLessThanOrEqual)    \
+  V(NumberAdd)                \
+  V(NumberSubtract)           \
+  V(NumberMultiply)           \
+  V(NumberDivide)             \
+  V(NumberModulus)            \
+  V(NumberToInt32)            \
+  V(NumberToUint32)           \
+  V(ReferenceEqual)           \
+  V(StringEqual)              \
+  V(StringLessThan)           \
+  V(StringLessThanOrEqual)    \
+  V(StringAdd)                \
+  V(ChangeTaggedToInt32)      \
+  V(ChangeTaggedToUint32)     \
+  V(ChangeTaggedToFloat64)    \
+  V(ChangeInt32ToTagged)      \
+  V(ChangeUint32ToTagged)     \
+  V(ChangeFloat64ToTagged)    \
+  V(ChangeBoolToBit)          \
+  V(ChangeBitToBool)          \
+  V(LoadField)                \
+  V(LoadElement)              \
+  V(StoreField)               \
+  V(StoreElement)
+
+// Opcodes for Machine-level operators.
+#define MACHINE_OP_LIST(V)    \
+  V(Load)                     \
+  V(Store)                    \
+  V(Word32And)                \
+  V(Word32Or)                 \
+  V(Word32Xor)                \
+  V(Word32Shl)                \
+  V(Word32Shr)                \
+  V(Word32Sar)                \
+  V(Word32Ror)                \
+  V(Word32Equal)              \
+  V(Word64And)                \
+  V(Word64Or)                 \
+  V(Word64Xor)                \
+  V(Word64Shl)                \
+  V(Word64Shr)                \
+  V(Word64Sar)                \
+  V(Word64Ror)                \
+  V(Word64Equal)              \
+  V(Int32Add)                 \
+  V(Int32AddWithOverflow)     \
+  V(Int32Sub)                 \
+  V(Int32SubWithOverflow)     \
+  V(Int32Mul)                 \
+  V(Int32Div)                 \
+  V(Int32UDiv)                \
+  V(Int32Mod)                 \
+  V(Int32UMod)                \
+  V(Int32LessThan)            \
+  V(Int32LessThanOrEqual)     \
+  V(Uint32LessThan)           \
+  V(Uint32LessThanOrEqual)    \
+  V(Int64Add)                 \
+  V(Int64Sub)                 \
+  V(Int64Mul)                 \
+  V(Int64Div)                 \
+  V(Int64UDiv)                \
+  V(Int64Mod)                 \
+  V(Int64UMod)                \
+  V(Int64LessThan)            \
+  V(Int64LessThanOrEqual)     \
+  V(ChangeFloat32ToFloat64)   \
+  V(ChangeFloat64ToInt32)     \
+  V(ChangeFloat64ToUint32)    \
+  V(ChangeInt32ToFloat64)     \
+  V(ChangeInt32ToInt64)       \
+  V(ChangeUint32ToFloat64)    \
+  V(ChangeUint32ToUint64)     \
+  V(TruncateFloat64ToFloat32) \
+  V(TruncateFloat64ToInt32)   \
+  V(TruncateInt64ToInt32)     \
+  V(Float64Add)               \
+  V(Float64Sub)               \
+  V(Float64Mul)               \
+  V(Float64Div)               \
+  V(Float64Mod)               \
+  V(Float64Sqrt)              \
+  V(Float64Equal)             \
+  V(Float64LessThan)          \
+  V(Float64LessThanOrEqual)
+
+#define VALUE_OP_LIST(V) \
+  COMMON_OP_LIST(V)      \
+  SIMPLIFIED_OP_LIST(V)  \
+  MACHINE_OP_LIST(V)     \
+  JS_OP_LIST(V)
+
+// The combination of all operators at all levels and the common operators.
+#define ALL_OP_LIST(V) \
+  CONTROL_OP_LIST(V)   \
+  VALUE_OP_LIST(V)
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Declare an enumeration with all the opcodes at all levels so that they
+// can be globally, uniquely numbered.
+class IrOpcode {
+ public:
+  enum Value {
+#define DECLARE_OPCODE(x) k##x,
+    ALL_OP_LIST(DECLARE_OPCODE)
+#undef DECLARE_OPCODE
+    kLast = -1
+#define COUNT_OPCODE(x) +1
+            ALL_OP_LIST(COUNT_OPCODE)
+#undef COUNT_OPCODE
+  };
+
+  // Returns the mnemonic name of an opcode.
+  static const char* Mnemonic(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return #x;
+      ALL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return "UnknownOpcode";
+    }
+  }
+
+  static bool IsJsOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      JS_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsControlOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsCommonOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+      COMMON_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPCODES_H_
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
new file mode 100644
index 0000000..9dae106
--- /dev/null
+++ b/src/compiler/operator-properties-inl.h
@@ -0,0 +1,183 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+inline bool OperatorProperties::HasValueInput(const Operator* op) {
+  return OperatorProperties::GetValueInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasContextInput(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return IrOpcode::IsJsOpcode(opcode);
+}
+
+inline bool OperatorProperties::HasEffectInput(const Operator* op) {
+  return OperatorProperties::GetEffectInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasControlInput(const Operator* op) {
+  return OperatorProperties::GetControlInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+  if (!FLAG_turbo_deoptimization) {
+    return false;
+  }
+
+  switch (op->opcode()) {
+    case IrOpcode::kFrameState:
+      return true;
+    case IrOpcode::kJSCallRuntime: {
+      Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(op);
+      return Linkage::NeedsFrameState(function);
+    }
+
+    // Strict equality cannot lazily deoptimize.
+    case IrOpcode::kJSStrictEqual:
+    case IrOpcode::kJSStrictNotEqual:
+      return false;
+
+    // Calls
+    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallConstruct:
+
+    // Compare operations
+    case IrOpcode::kJSEqual:
+    case IrOpcode::kJSNotEqual:
+    case IrOpcode::kJSLessThan:
+    case IrOpcode::kJSGreaterThan:
+    case IrOpcode::kJSLessThanOrEqual:
+    case IrOpcode::kJSGreaterThanOrEqual:
+
+    // Binary operations
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus:
+    case IrOpcode::kJSLoadProperty:
+    case IrOpcode::kJSStoreProperty:
+    case IrOpcode::kJSLoadNamed:
+    case IrOpcode::kJSStoreNamed:
+      return true;
+
+    default:
+      return false;
+  }
+}
+
+inline int OperatorProperties::GetValueInputCount(const Operator* op) {
+  return op->InputCount();
+}
+
+inline int OperatorProperties::GetContextInputCount(const Operator* op) {
+  return OperatorProperties::HasContextInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
+  return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetEffectInputCount(const Operator* op) {
+  if (op->opcode() == IrOpcode::kEffectPhi ||
+      op->opcode() == IrOpcode::kFinish) {
+    return OpParameter<int>(op);
+  }
+  if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
+    return 0;  // no effects.
+  return 1;
+}
+
+inline int OperatorProperties::GetControlInputCount(const Operator* op) {
+  switch (op->opcode()) {
+    case IrOpcode::kPhi:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kControlEffect:
+      return 1;
+#define OPCODE_CASE(x) case IrOpcode::k##x:
+      CONTROL_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+      // Control operators are Operator1<int>.
+      return OpParameter<int>(op);
+    default:
+      // Operators that have write effects must have a control
+      // dependency. Effect dependencies only ensure the correct order of
+      // write/read operations without consideration of control flow. Without an
+      // explicit control dependency writes can be float in the schedule too
+      // early along a path that shouldn't generate a side-effect.
+      return op->HasProperty(Operator::kNoWrite) ? 0 : 1;
+  }
+  return 0;
+}
+
+inline int OperatorProperties::GetTotalInputCount(const Operator* op) {
+  return GetValueInputCount(op) + GetContextInputCount(op) +
+         GetFrameStateInputCount(op) + GetEffectInputCount(op) +
+         GetControlInputCount(op);
+}
+
+// -----------------------------------------------------------------------------
+// Output properties.
+
+inline bool OperatorProperties::HasValueOutput(const Operator* op) {
+  return GetValueOutputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasEffectOutput(const Operator* op) {
+  return op->opcode() == IrOpcode::kStart ||
+         op->opcode() == IrOpcode::kControlEffect ||
+         op->opcode() == IrOpcode::kValueEffect ||
+         (op->opcode() != IrOpcode::kFinish && GetEffectInputCount(op) > 0);
+}
+
+inline bool OperatorProperties::HasControlOutput(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode));
+}
+
+
+inline int OperatorProperties::GetValueOutputCount(const Operator* op) {
+  return op->OutputCount();
+}
+
+inline int OperatorProperties::GetEffectOutputCount(const Operator* op) {
+  return HasEffectOutput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetControlOutputCount(const Operator* node) {
+  return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
+                                                                          : 0;
+}
+
+
+inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
+  uint8_t opcode = op->opcode();
+  return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
+         opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
+         opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
+         opcode == IrOpcode::kIfFalse;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
new file mode 100644
index 0000000..718eea0
--- /dev/null
+++ b/src/compiler/operator-properties.h
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+class OperatorProperties {
+ public:
+  static inline bool HasValueInput(const Operator* op);
+  static inline bool HasContextInput(const Operator* op);
+  static inline bool HasEffectInput(const Operator* op);
+  static inline bool HasControlInput(const Operator* op);
+  static inline bool HasFrameStateInput(const Operator* op);
+
+  static inline int GetValueInputCount(const Operator* op);
+  static inline int GetContextInputCount(const Operator* op);
+  static inline int GetEffectInputCount(const Operator* op);
+  static inline int GetControlInputCount(const Operator* op);
+  static inline int GetFrameStateInputCount(const Operator* op);
+  static inline int GetTotalInputCount(const Operator* op);
+
+  static inline bool HasValueOutput(const Operator* op);
+  static inline bool HasEffectOutput(const Operator* op);
+  static inline bool HasControlOutput(const Operator* op);
+
+  static inline int GetValueOutputCount(const Operator* op);
+  static inline int GetEffectOutputCount(const Operator* op);
+  static inline int GetControlOutputCount(const Operator* op);
+
+  static inline bool IsBasicBlockBegin(const Operator* op);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_H_
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
new file mode 100644
index 0000000..35f9c88
--- /dev/null
+++ b/src/compiler/operator.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Operator::~Operator() {}
+
+
+SimpleOperator::SimpleOperator(Opcode opcode, Properties properties,
+                               int input_count, int output_count,
+                               const char* mnemonic)
+    : Operator(opcode, properties, mnemonic),
+      input_count_(input_count),
+      output_count_(output_count) {}
+
+
+SimpleOperator::~SimpleOperator() {}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
new file mode 100644
index 0000000..5137806
--- /dev/null
+++ b/src/compiler/operator.h
@@ -0,0 +1,262 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_H_
+#define V8_COMPILER_OPERATOR_H_
+
+#include "src/base/flags.h"
+#include "src/ostreams.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An operator represents description of the "computation" of a node in the
+// compiler IR. A computation takes values (i.e. data) as input and produces
+// zero or more values as output. The side-effects of a computation must be
+// captured by additional control and data dependencies which are part of the
+// IR graph.
+// Operators are immutable and describe the statically-known parts of a
+// computation. Thus they can be safely shared by many different nodes in the
+// IR graph, or even globally between graphs. Operators can have "static
+// parameters" which are compile-time constant parameters to the operator, such
+// as the name for a named field access, the ID of a runtime function, etc.
+// Static parameters are private to the operator and only semantically
+// meaningful to the operator itself.
+class Operator : public ZoneObject {
+ public:
+  typedef uint8_t Opcode;
+
+  // Properties inform the operator-independent optimizer about legal
+  // transformations for nodes that have this operator.
+  enum Property {
+    kNoProperties = 0,
+    kReducible = 1 << 0,    // Participates in strength reduction.
+    kCommutative = 1 << 1,  // OP(a, b) == OP(b, a) for all inputs.
+    kAssociative = 1 << 2,  // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+    kIdempotent = 1 << 3,   // OP(a); OP(a) == OP(a).
+    kNoRead = 1 << 4,       // Has no scheduling dependency on Effects
+    kNoWrite = 1 << 5,      // Does not modify any Effects and thereby
+                            // create new scheduling dependencies.
+    kNoThrow = 1 << 6,      // Can never generate an exception.
+    kFoldable = kNoRead | kNoWrite,
+    kEliminatable = kNoWrite | kNoThrow,
+    kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+  };
+  typedef base::Flags<Property, uint8_t> Properties;
+
+  Operator(Opcode opcode, Properties properties, const char* mnemonic)
+      : opcode_(opcode), properties_(properties), mnemonic_(mnemonic) {}
+  virtual ~Operator();
+
+  // A small integer unique to all instances of a particular kind of operator,
+  // useful for quick matching for specific kinds of operators. For fast access
+  // the opcode is stored directly in the operator object.
+  Opcode opcode() const { return opcode_; }
+
+  // Returns a constant string representing the mnemonic of the operator,
+  // without the static parameters. Useful for debugging.
+  const char* mnemonic() const { return mnemonic_; }
+
+  // Check if this operator equals another operator. Equivalent operators can
+  // be merged, and nodes with equivalent operators and equivalent inputs
+  // can be merged.
+  virtual bool Equals(const Operator* other) const = 0;
+
+  // Compute a hashcode to speed up equivalence-set checking.
+  // Equal operators should always have equal hashcodes, and unequal operators
+  // should have unequal hashcodes with high probability.
+  virtual int HashCode() const = 0;
+
+  // Check whether this operator has the given property.
+  bool HasProperty(Property property) const {
+    return (properties() & property) == property;
+  }
+
+  // Number of data inputs to the operator, for verifying graph structure.
+  virtual int InputCount() const = 0;
+
+  // Number of data outputs from the operator, for verifying graph structure.
+  virtual int OutputCount() const = 0;
+
+  Properties properties() const { return properties_; }
+
+  // TODO(titzer): API for input and output types, for typechecking graph.
+ protected:
+  // Print the full operator into the given stream, including any
+  // static parameters. Useful for debugging and visualizing the IR.
+  virtual OStream& PrintTo(OStream& os) const = 0;  // NOLINT
+  friend OStream& operator<<(OStream& os, const Operator& op);
+
+ private:
+  Opcode opcode_;
+  Properties properties_;
+  const char* mnemonic_;
+
+  DISALLOW_COPY_AND_ASSIGN(Operator);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
+
+OStream& operator<<(OStream& os, const Operator& op);
+
+// An implementation of Operator that has no static parameters. Such operators
+// have just a name, an opcode, and a fixed number of inputs and outputs.
+// They can represented by singletons and shared globally.
+class SimpleOperator : public Operator {
+ public:
+  SimpleOperator(Opcode opcode, Properties properties, int input_count,
+                 int output_count, const char* mnemonic);
+  ~SimpleOperator();
+
+  virtual bool Equals(const Operator* that) const FINAL {
+    return opcode() == that->opcode();
+  }
+  virtual int HashCode() const FINAL { return opcode(); }
+  virtual int InputCount() const FINAL { return input_count_; }
+  virtual int OutputCount() const FINAL { return output_count_; }
+
+ private:
+  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
+    return os << mnemonic();
+  }
+
+  int input_count_;
+  int output_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleOperator);
+};
+
+// Template specialization implements a kind of type class for dealing with the
+// static parameters of Operator1 automatically.
+template <typename T>
+struct StaticParameterTraits {
+  static OStream& PrintTo(OStream& os, T val) {  // NOLINT
+    return os << "??";
+  }
+  static int HashCode(T a) { return 0; }
+  static bool Equals(T a, T b) {
+    return false;  // Not every T has a ==. By default, be conservative.
+  }
+};
+
+// Specialization for static parameters of type {int}.
+template <>
+struct StaticParameterTraits<int> {
+  static OStream& PrintTo(OStream& os, int val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(int a) { return a; }
+  static bool Equals(int a, int b) { return a == b; }
+};
+
+// Specialization for static parameters of type {double}.
+template <>
+struct StaticParameterTraits<double> {
+  static OStream& PrintTo(OStream& os, double val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(double a) {
+    return static_cast<int>(bit_cast<int64_t>(a));
+  }
+  static bool Equals(double a, double b) {
+    return bit_cast<int64_t>(a) == bit_cast<int64_t>(b);
+  }
+};
+
+// Specialization for static parameters of type {Unique<Object>}.
+template <>
+struct StaticParameterTraits<Unique<Object> > {
+  static OStream& PrintTo(OStream& os, Unique<Object> val) {  // NOLINT
+    return os << Brief(*val.handle());
+  }
+  static int HashCode(Unique<Object> a) {
+    return static_cast<int>(a.Hashcode());
+  }
+  static bool Equals(Unique<Object> a, Unique<Object> b) { return a == b; }
+};
+
+// Specialization for static parameters of type {Unique<Name>}.
+template <>
+struct StaticParameterTraits<Unique<Name> > {
+  static OStream& PrintTo(OStream& os, Unique<Name> val) {  // NOLINT
+    return os << Brief(*val.handle());
+  }
+  static int HashCode(Unique<Name> a) { return static_cast<int>(a.Hashcode()); }
+  static bool Equals(Unique<Name> a, Unique<Name> b) { return a == b; }
+};
+
+#if DEBUG
+// Specialization for static parameters of type {Handle<Object>} to prevent any
+// direct usage of Handles in constants.
+template <>
+struct StaticParameterTraits<Handle<Object> > {
+  static OStream& PrintTo(OStream& os, Handle<Object> val) {  // NOLINT
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return os;
+  }
+  static int HashCode(Handle<Object> a) {
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return 0;
+  }
+  static bool Equals(Handle<Object> a, Handle<Object> b) {
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return false;
+  }
+};
+#endif
+
+// A templatized implementation of Operator that has one static parameter of
+// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then
+// operators of this kind can automatically be hashed, compared, and printed.
+template <typename T>
+class Operator1 : public Operator {
+ public:
+  Operator1(Opcode opcode, Properties properties, int input_count,
+            int output_count, const char* mnemonic, T parameter)
+      : Operator(opcode, properties, mnemonic),
+        input_count_(input_count),
+        output_count_(output_count),
+        parameter_(parameter) {}
+
+  const T& parameter() const { return parameter_; }
+
+  virtual bool Equals(const Operator* other) const OVERRIDE {
+    if (opcode() != other->opcode()) return false;
+    const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
+    return StaticParameterTraits<T>::Equals(this->parameter_, that->parameter_);
+  }
+  virtual int HashCode() const OVERRIDE {
+    return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
+  }
+  virtual int InputCount() const OVERRIDE { return input_count_; }
+  virtual int OutputCount() const OVERRIDE { return output_count_; }
+  virtual OStream& PrintParameter(OStream& os) const {  // NOLINT
+    return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
+  }
+
+ protected:
+  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
+    return PrintParameter(os << mnemonic());
+  }
+
+ private:
+  int input_count_;
+  int output_count_;
+  T parameter_;
+};
+
+
+// Helper to extract parameters from Operator1<*> operator.
+template <typename T>
+static inline const T& OpParameter(const Operator* op) {
+  return reinterpret_cast<const Operator1<T>*>(op)->parameter();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_H_
diff --git a/src/compiler/phi-reducer.h b/src/compiler/phi-reducer.h
new file mode 100644
index 0000000..5870d04
--- /dev/null
+++ b/src/compiler/phi-reducer.h
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PHI_REDUCER_H_
+#define V8_COMPILER_PHI_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Replaces redundant phis if all the inputs are the same or the phi itself.
+class PhiReducer FINAL : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) OVERRIDE {
+    if (node->opcode() != IrOpcode::kPhi &&
+        node->opcode() != IrOpcode::kEffectPhi)
+      return NoChange();
+
+    int n = node->op()->InputCount();
+    if (n == 1) return Replace(node->InputAt(0));
+
+    Node* replacement = NULL;
+    Node::Inputs inputs = node->inputs();
+    for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
+      Node* input = *it;
+      if (input != node && input != replacement) {
+        if (replacement != NULL) return NoChange();
+        replacement = input;
+      }
+    }
+    DCHECK_NE(node, replacement);
+    return Replace(replacement);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PHI_REDUCER_H_
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
new file mode 100644
index 0000000..9889b6a
--- /dev/null
+++ b/src/compiler/pipeline.cc
@@ -0,0 +1,422 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/code-generator.h"
+#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/phi-reducer.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "src/compiler/verifier.h"
+#include "src/hydrogen.h"
+#include "src/ostreams.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class PhaseStats {
+ public:
+  enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN };
+
+  PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name)
+      : info_(info),
+        kind_(kind),
+        name_(name),
+        size_(info->zone()->allocation_size()) {
+    if (FLAG_turbo_stats) {
+      timer_.Start();
+    }
+  }
+
+  ~PhaseStats() {
+    if (FLAG_turbo_stats) {
+      base::TimeDelta delta = timer_.Elapsed();
+      size_t bytes = info_->zone()->allocation_size() - size_;
+      HStatistics* stats = info_->isolate()->GetTStatistics();
+      stats->SaveTiming(name_, delta, static_cast<int>(bytes));
+
+      switch (kind_) {
+        case CREATE_GRAPH:
+          stats->IncrementCreateGraph(delta);
+          break;
+        case OPTIMIZATION:
+          stats->IncrementOptimizeGraph(delta);
+          break;
+        case CODEGEN:
+          stats->IncrementGenerateCode(delta);
+          break;
+      }
+    }
+  }
+
+ private:
+  CompilationInfo* info_;
+  PhaseKind kind_;
+  const char* name_;
+  size_t size_;
+  base::ElapsedTimer timer_;
+};
+
+
+static inline bool VerifyGraphs() {
+#ifdef DEBUG
+  return true;
+#else
+  return FLAG_turbo_verify;
+#endif
+}
+
+
+void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
+  if (FLAG_trace_turbo) {
+    char buffer[256];
+    Vector<char> filename(buffer, sizeof(buffer));
+    if (!info_->shared_info().is_null()) {
+      SmartArrayPointer<char> functionname =
+          info_->shared_info()->DebugName()->ToCString();
+      if (strlen(functionname.get()) > 0) {
+        SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase);
+      } else {
+        SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase);
+      }
+    } else {
+      SNPrintF(filename, "turbo-none-%s.dot", phase);
+    }
+    std::replace(filename.start(), filename.start() + filename.length(), ' ',
+                 '_');
+    FILE* file = base::OS::FOpen(filename.start(), "w+");
+    OFStream of(file);
+    of << AsDOT(*graph);
+    fclose(file);
+
+    OFStream os(stdout);
+    os << "-- " << phase << " graph printed to file " << filename.start()
+       << "\n";
+  }
+  if (VerifyGraphs()) Verifier::Run(graph);
+}
+
+
+class AstGraphBuilderWithPositions : public AstGraphBuilder {
+ public:
+  explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
+                                        SourcePositionTable* source_positions)
+      : AstGraphBuilder(info, jsgraph), source_positions_(source_positions) {}
+
+  bool CreateGraph() {
+    SourcePositionTable::Scope pos(source_positions_,
+                                   SourcePosition::Unknown());
+    return AstGraphBuilder::CreateGraph();
+  }
+
+#define DEF_VISIT(type)                                               \
+  virtual void Visit##type(type* node) OVERRIDE {                  \
+    SourcePositionTable::Scope pos(source_positions_,                 \
+                                   SourcePosition(node->position())); \
+    AstGraphBuilder::Visit##type(node);                               \
+  }
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+  SourcePositionTable* source_positions_;
+};
+
+
+static void TraceSchedule(Schedule* schedule) {
+  if (!FLAG_trace_turbo) return;
+  OFStream os(stdout);
+  os << "-- Schedule --------------------------------------\n" << *schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCode() {
+  if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
+      info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
+      // TODO(turbofan): Make ES6 for-of work and remove this bailout.
+      info()->function()->dont_optimize_reason() == kForOfStatement ||
+      // TODO(turbofan): Make super work and remove this bailout.
+      info()->function()->dont_optimize_reason() == kSuperReference ||
+      // TODO(turbofan): Make OSR work and remove this bailout.
+      info()->is_osr()) {
+    return Handle<Code>::null();
+  }
+
+  if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "---------------------------------------------------\n"
+       << "Begin compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  // Build the graph.
+  Graph graph(zone());
+  SourcePositionTable source_positions(&graph);
+  source_positions.AddDecorator();
+  // TODO(turbofan): there is no need to type anything during initial graph
+  // construction.  This is currently only needed for the node cache, which the
+  // typer could sweep over later.
+  Typer typer(zone());
+  MachineOperatorBuilder machine;
+  CommonOperatorBuilder common(zone());
+  JSOperatorBuilder javascript(zone());
+  JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
+  Node* context_node;
+  {
+    PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
+                                   "graph builder");
+    AstGraphBuilderWithPositions graph_builder(info(), &jsgraph,
+                                               &source_positions);
+    graph_builder.CreateGraph();
+    context_node = graph_builder.GetFunctionContext();
+  }
+  {
+    PhaseStats phi_reducer_stats(info(), PhaseStats::CREATE_GRAPH,
+                                 "phi reduction");
+    PhiReducer phi_reducer;
+    GraphReducer graph_reducer(&graph);
+    graph_reducer.AddReducer(&phi_reducer);
+    graph_reducer.ReduceGraph();
+    // TODO(mstarzinger): Running reducer once ought to be enough for everyone.
+    graph_reducer.ReduceGraph();
+    graph_reducer.ReduceGraph();
+  }
+
+  VerifyAndPrintGraph(&graph, "Initial untyped");
+
+  if (info()->is_context_specializing()) {
+    SourcePositionTable::Scope pos(&source_positions,
+                                   SourcePosition::Unknown());
+    // Specialize the code to the context as aggressively as possible.
+    JSContextSpecializer spec(info(), &jsgraph, context_node);
+    spec.SpecializeToContext();
+    VerifyAndPrintGraph(&graph, "Context specialized");
+  }
+
+  if (info()->is_inlining_enabled()) {
+    SourcePositionTable::Scope pos(&source_positions,
+                                   SourcePosition::Unknown());
+    JSInliner inliner(info(), &jsgraph);
+    inliner.Inline();
+    VerifyAndPrintGraph(&graph, "Inlined");
+  }
+
+  // Print a replay of the initial graph.
+  if (FLAG_print_turbo_replay) {
+    GraphReplayPrinter::PrintReplay(&graph);
+  }
+
+  if (info()->is_typing_enabled()) {
+    {
+      // Type the graph.
+      PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
+      typer.Run(&graph, info()->context());
+      VerifyAndPrintGraph(&graph, "Typed");
+    }
+    // All new nodes must be typed.
+    typer.DecorateGraph(&graph);
+    {
+      // Lower JSOperators where we can determine types.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "typed lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      JSTypedLowering lowering(&jsgraph);
+      GraphReducer graph_reducer(&graph);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered typed");
+    }
+    {
+      // Lower simplified operators and insert changes.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "simplified lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      SimplifiedLowering lowering(&jsgraph);
+      lowering.LowerAllNodes();
+
+      VerifyAndPrintGraph(&graph, "Lowered simplified");
+    }
+    {
+      // Lower changes that have been inserted before.
+      PhaseStats lowering_stats(info(), PhaseStats::OPTIMIZATION,
+                                "change lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      Linkage linkage(info());
+      // TODO(turbofan): Value numbering disabled for now.
+      // ValueNumberingReducer vn_reducer(zone());
+      SimplifiedOperatorReducer simple_reducer(&jsgraph);
+      ChangeLowering lowering(&jsgraph, &linkage);
+      MachineOperatorReducer mach_reducer(&jsgraph);
+      GraphReducer graph_reducer(&graph);
+      // TODO(titzer): Figure out if we should run all reducers at once here.
+      // graph_reducer.AddReducer(&vn_reducer);
+      graph_reducer.AddReducer(&simple_reducer);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.AddReducer(&mach_reducer);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered changes");
+    }
+  }
+
+  Handle<Code> code = Handle<Code>::null();
+  if (SupportedTarget()) {
+    {
+      // Lower any remaining generic JSOperators.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "generic lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      JSGenericLowering lowering(info(), &jsgraph);
+      GraphReducer graph_reducer(&graph);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered generic");
+    }
+
+    {
+      // Compute a schedule.
+      Schedule* schedule = ComputeSchedule(&graph);
+      // Generate optimized code.
+      PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+      Linkage linkage(info());
+      code = GenerateCode(&linkage, &graph, schedule, &source_positions);
+      info()->SetCode(code);
+    }
+
+    // Print optimized code.
+    v8::internal::CodeGenerator::PrintCode(code, info());
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "--------------------------------------------------\n"
+       << "Finished compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  return code;
+}
+
+
+Schedule* Pipeline::ComputeSchedule(Graph* graph) {
+  PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
+  Schedule* schedule = Scheduler::ComputeSchedule(graph);
+  TraceSchedule(schedule);
+  if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+  return schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
+                                                   Graph* graph,
+                                                   Schedule* schedule) {
+  CHECK(SupportedBackend());
+  if (schedule == NULL) {
+    VerifyAndPrintGraph(graph, "Machine");
+    schedule = ComputeSchedule(graph);
+  }
+  TraceSchedule(schedule);
+
+  SourcePositionTable source_positions(graph);
+  Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions);
+#if ENABLE_DISASSEMBLER
+  if (!code.is_null() && FLAG_print_opt_code) {
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
+    code->Disassemble("test code", os);
+  }
+#endif
+  return code;
+}
+
+
+Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
+                                    Schedule* schedule,
+                                    SourcePositionTable* source_positions) {
+  DCHECK_NOT_NULL(graph);
+  DCHECK_NOT_NULL(linkage);
+  DCHECK_NOT_NULL(schedule);
+  CHECK(SupportedBackend());
+
+  InstructionSequence sequence(linkage, graph, schedule);
+
+  // Select and schedule instructions covering the scheduled graph.
+  {
+    InstructionSelector selector(&sequence, source_positions);
+    selector.SelectInstructions();
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence before register allocation -----\n"
+       << sequence;
+  }
+
+  // Allocate registers.
+  {
+    int node_count = graph->NodeCount();
+    if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
+      return Handle<Code>::null();
+    }
+    RegisterAllocator allocator(&sequence);
+    if (!allocator.Allocate()) {
+      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+      return Handle<Code>::null();
+    }
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence after register allocation -----\n"
+       << sequence;
+  }
+
+  // Generate native sequence.
+  CodeGenerator generator(&sequence);
+  return generator.GenerateCode();
+}
+
+
+void Pipeline::SetUp() {
+  InstructionOperand::SetUpCaches();
+}
+
+
+void Pipeline::TearDown() {
+  InstructionOperand::TearDownCaches();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
new file mode 100644
index 0000000..9f8241a
--- /dev/null
+++ b/src/compiler/pipeline.h
@@ -0,0 +1,59 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PIPELINE_H_
+#define V8_COMPILER_PIPELINE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+
+// Note: TODO(turbofan) implies a performance improvement opportunity,
+//   and TODO(name) implies an incomplete implementation
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+class Graph;
+class Schedule;
+class SourcePositionTable;
+class Linkage;
+
+class Pipeline {
+ public:
+  explicit Pipeline(CompilationInfo* info) : info_(info) {}
+
+  // Run the entire pipeline and generate a handle to a code object.
+  Handle<Code> GenerateCode();
+
+  // Run the pipeline on a machine graph and generate code. If {schedule}
+  // is {NULL}, then compute a new schedule for code generation.
+  Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
+                                           Schedule* schedule = NULL);
+
+  static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
+  static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
+
+  static void SetUp();
+  static void TearDown();
+
+ private:
+  CompilationInfo* info_;
+
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() { return info_->isolate(); }
+  Zone* zone() { return info_->zone(); }
+
+  Schedule* ComputeSchedule(Graph* graph);
+  void VerifyAndPrintGraph(Graph* graph, const char* phase);
+  Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
+                            SourcePositionTable* source_positions);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PIPELINE_H_
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
new file mode 100644
index 0000000..7f45eb9
--- /dev/null
+++ b/src/compiler/raw-machine-assembler.cc
@@ -0,0 +1,165 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-factory.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RawMachineAssembler::RawMachineAssembler(Graph* graph,
+                                         MachineSignature* machine_sig,
+                                         MachineType word)
+    : GraphBuilder(graph),
+      schedule_(new (zone()) Schedule(zone())),
+      machine_(word),
+      common_(zone()),
+      machine_sig_(machine_sig),
+      call_descriptor_(
+          Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
+      parameters_(NULL),
+      exit_label_(schedule()->end()),
+      current_block_(schedule()->start()) {
+  int param_count = static_cast<int>(parameter_count());
+  Node* s = graph->NewNode(common_.Start(param_count));
+  graph->SetStart(s);
+  if (parameter_count() == 0) return;
+  parameters_ = zone()->NewArray<Node*>(param_count);
+  for (size_t i = 0; i < parameter_count(); ++i) {
+    parameters_[i] =
+        NewNode(common()->Parameter(static_cast<int>(i)), graph->start());
+  }
+}
+
+
+Schedule* RawMachineAssembler::Export() {
+  // Compute the correct codegen order.
+  DCHECK(schedule_->rpo_order()->empty());
+  Scheduler::ComputeSpecialRPO(schedule_);
+  // Invalidate MachineAssembler.
+  Schedule* schedule = schedule_;
+  schedule_ = NULL;
+  return schedule;
+}
+
+
+Node* RawMachineAssembler::Parameter(size_t index) {
+  DCHECK(index < parameter_count());
+  return parameters_[index];
+}
+
+
+RawMachineAssembler::Label* RawMachineAssembler::Exit() {
+  exit_label_.used_ = true;
+  return &exit_label_;
+}
+
+
+void RawMachineAssembler::Goto(Label* label) {
+  DCHECK(current_block_ != schedule()->end());
+  schedule()->AddGoto(CurrentBlock(), Use(label));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Branch(Node* condition, Label* true_val,
+                                 Label* false_val) {
+  DCHECK(current_block_ != schedule()->end());
+  Node* branch = NewNode(common()->Branch(), condition);
+  schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Return(Node* value) {
+  schedule()->AddReturn(CurrentBlock(), value);
+  current_block_ = NULL;
+}
+
+
+Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
+                                             Node* context, Node* frame_state,
+                                             CallFunctionFlags flags) {
+  Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState, zone());
+  Node* stub_code = HeapConstant(callable.code());
+  Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
+                                receiver, context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
+                                   Node* context, Node* frame_state) {
+  CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
+  Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
+                                context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+                                        Node* arg0, Node* context,
+                                        Node* frame_state) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      function, 1, Operator::kNoProperties, zone());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+  Node* ref = NewNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(1);
+
+  Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
+                                arity, context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+void RawMachineAssembler::Bind(Label* label) {
+  DCHECK(current_block_ == NULL);
+  DCHECK(!label->bound_);
+  label->bound_ = true;
+  current_block_ = EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::Use(Label* label) {
+  label->used_ = true;
+  return EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+  if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+  return label->block_;
+}
+
+
+BasicBlock* RawMachineAssembler::CurrentBlock() {
+  DCHECK(current_block_);
+  return current_block_;
+}
+
+
+Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
+                                    Node** inputs) {
+  DCHECK(ScheduleValid());
+  DCHECK(current_block_ != NULL);
+  Node* node = graph()->NewNode(op, input_count, inputs);
+  BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
+                                                           : CurrentBlock();
+  schedule()->AddNode(block, node);
+  return node;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
new file mode 100644
index 0000000..a4af55a
--- /dev/null
+++ b/src/compiler/raw-machine-assembler.h
@@ -0,0 +1,438 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+
+
+class RawMachineAssembler : public GraphBuilder {
+ public:
+  class Label {
+   public:
+    Label() : block_(NULL), used_(false), bound_(false) {}
+    ~Label() { DCHECK(bound_ || !used_); }
+
+    BasicBlock* block() { return block_; }
+
+   private:
+    // Private constructor for exit label.
+    explicit Label(BasicBlock* block)
+        : block_(block), used_(false), bound_(false) {}
+
+    BasicBlock* block_;
+    bool used_;
+    bool bound_;
+    friend class RawMachineAssembler;
+    DISALLOW_COPY_AND_ASSIGN(Label);
+  };
+
+  RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
+                      MachineType word = kMachPtr);
+  virtual ~RawMachineAssembler() {}
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return graph()->zone(); }
+  MachineOperatorBuilder* machine() { return &machine_; }
+  CommonOperatorBuilder* common() { return &common_; }
+  CallDescriptor* call_descriptor() const { return call_descriptor_; }
+  size_t parameter_count() const { return machine_sig_->parameter_count(); }
+  MachineSignature* machine_sig() const { return machine_sig_; }
+
+  Node* UndefinedConstant() {
+    Unique<Object> unique = Unique<Object>::CreateImmovable(
+        isolate()->factory()->undefined_value());
+    return NewNode(common()->HeapConstant(unique));
+  }
+
+  // Constants.
+  Node* PointerConstant(void* value) {
+    return IntPtrConstant(reinterpret_cast<intptr_t>(value));
+  }
+  Node* IntPtrConstant(intptr_t value) {
+    // TODO(dcarney): mark generated code as unserializable if value != 0.
+    return kPointerSize == 8 ? Int64Constant(value)
+                             : Int32Constant(static_cast<int>(value));
+  }
+  Node* Int32Constant(int32_t value) {
+    return NewNode(common()->Int32Constant(value));
+  }
+  Node* Int64Constant(int64_t value) {
+    return NewNode(common()->Int64Constant(value));
+  }
+  Node* NumberConstant(double value) {
+    return NewNode(common()->NumberConstant(value));
+  }
+  Node* Float64Constant(double value) {
+    return NewNode(common()->Float64Constant(value));
+  }
+  Node* HeapConstant(Handle<Object> object) {
+    Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+    return NewNode(common()->HeapConstant(val));
+  }
+
+  Node* Projection(int index, Node* a) {
+    return NewNode(common()->Projection(index), a);
+  }
+
+  // Memory Operations.
+  Node* Load(MachineType rep, Node* base) {
+    return Load(rep, base, Int32Constant(0));
+  }
+  Node* Load(MachineType rep, Node* base, Node* index) {
+    return NewNode(machine()->Load(rep), base, index);
+  }
+  void Store(MachineType rep, Node* base, Node* value) {
+    Store(rep, base, Int32Constant(0), value);
+  }
+  void Store(MachineType rep, Node* base, Node* index, Node* value) {
+    NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
+            index, value);
+  }
+  // Arithmetic Operations.
+  Node* WordAnd(Node* a, Node* b) {
+    return NewNode(machine()->WordAnd(), a, b);
+  }
+  Node* WordOr(Node* a, Node* b) { return NewNode(machine()->WordOr(), a, b); }
+  Node* WordXor(Node* a, Node* b) {
+    return NewNode(machine()->WordXor(), a, b);
+  }
+  Node* WordShl(Node* a, Node* b) {
+    return NewNode(machine()->WordShl(), a, b);
+  }
+  Node* WordShr(Node* a, Node* b) {
+    return NewNode(machine()->WordShr(), a, b);
+  }
+  Node* WordSar(Node* a, Node* b) {
+    return NewNode(machine()->WordSar(), a, b);
+  }
+  Node* WordRor(Node* a, Node* b) {
+    return NewNode(machine()->WordRor(), a, b);
+  }
+  Node* WordEqual(Node* a, Node* b) {
+    return NewNode(machine()->WordEqual(), a, b);
+  }
+  Node* WordNotEqual(Node* a, Node* b) {
+    return WordBinaryNot(WordEqual(a, b));
+  }
+  Node* WordNot(Node* a) {
+    if (machine()->Is32()) {
+      return Word32Not(a);
+    } else {
+      return Word64Not(a);
+    }
+  }
+  Node* WordBinaryNot(Node* a) {
+    if (machine()->Is32()) {
+      return Word32BinaryNot(a);
+    } else {
+      return Word64BinaryNot(a);
+    }
+  }
+
+  Node* Word32And(Node* a, Node* b) {
+    return NewNode(machine()->Word32And(), a, b);
+  }
+  Node* Word32Or(Node* a, Node* b) {
+    return NewNode(machine()->Word32Or(), a, b);
+  }
+  Node* Word32Xor(Node* a, Node* b) {
+    return NewNode(machine()->Word32Xor(), a, b);
+  }
+  Node* Word32Shl(Node* a, Node* b) {
+    return NewNode(machine()->Word32Shl(), a, b);
+  }
+  Node* Word32Shr(Node* a, Node* b) {
+    return NewNode(machine()->Word32Shr(), a, b);
+  }
+  Node* Word32Sar(Node* a, Node* b) {
+    return NewNode(machine()->Word32Sar(), a, b);
+  }
+  Node* Word32Ror(Node* a, Node* b) {
+    return NewNode(machine()->Word32Ror(), a, b);
+  }
+  Node* Word32Equal(Node* a, Node* b) {
+    return NewNode(machine()->Word32Equal(), a, b);
+  }
+  Node* Word32NotEqual(Node* a, Node* b) {
+    return Word32BinaryNot(Word32Equal(a, b));
+  }
+  Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+  Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
+
+  Node* Word64And(Node* a, Node* b) {
+    return NewNode(machine()->Word64And(), a, b);
+  }
+  Node* Word64Or(Node* a, Node* b) {
+    return NewNode(machine()->Word64Or(), a, b);
+  }
+  Node* Word64Xor(Node* a, Node* b) {
+    return NewNode(machine()->Word64Xor(), a, b);
+  }
+  Node* Word64Shl(Node* a, Node* b) {
+    return NewNode(machine()->Word64Shl(), a, b);
+  }
+  Node* Word64Shr(Node* a, Node* b) {
+    return NewNode(machine()->Word64Shr(), a, b);
+  }
+  Node* Word64Sar(Node* a, Node* b) {
+    return NewNode(machine()->Word64Sar(), a, b);
+  }
+  Node* Word64Ror(Node* a, Node* b) {
+    return NewNode(machine()->Word64Ror(), a, b);
+  }
+  Node* Word64Equal(Node* a, Node* b) {
+    return NewNode(machine()->Word64Equal(), a, b);
+  }
+  Node* Word64NotEqual(Node* a, Node* b) {
+    return Word64BinaryNot(Word64Equal(a, b));
+  }
+  Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
+  Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
+
+  Node* Int32Add(Node* a, Node* b) {
+    return NewNode(machine()->Int32Add(), a, b);
+  }
+  Node* Int32AddWithOverflow(Node* a, Node* b) {
+    return NewNode(machine()->Int32AddWithOverflow(), a, b);
+  }
+  Node* Int32Sub(Node* a, Node* b) {
+    return NewNode(machine()->Int32Sub(), a, b);
+  }
+  Node* Int32SubWithOverflow(Node* a, Node* b) {
+    return NewNode(machine()->Int32SubWithOverflow(), a, b);
+  }
+  Node* Int32Mul(Node* a, Node* b) {
+    return NewNode(machine()->Int32Mul(), a, b);
+  }
+  Node* Int32Div(Node* a, Node* b) {
+    return NewNode(machine()->Int32Div(), a, b);
+  }
+  Node* Int32UDiv(Node* a, Node* b) {
+    return NewNode(machine()->Int32UDiv(), a, b);
+  }
+  Node* Int32Mod(Node* a, Node* b) {
+    return NewNode(machine()->Int32Mod(), a, b);
+  }
+  Node* Int32UMod(Node* a, Node* b) {
+    return NewNode(machine()->Int32UMod(), a, b);
+  }
+  Node* Int32LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Int32LessThan(), a, b);
+  }
+  Node* Int32LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Int32LessThanOrEqual(), a, b);
+  }
+  Node* Uint32LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Uint32LessThan(), a, b);
+  }
+  Node* Uint32LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
+  }
+  Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
+  Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
+    return Int32LessThanOrEqual(b, a);
+  }
+  Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
+
+  Node* Int64Add(Node* a, Node* b) {
+    return NewNode(machine()->Int64Add(), a, b);
+  }
+  Node* Int64Sub(Node* a, Node* b) {
+    return NewNode(machine()->Int64Sub(), a, b);
+  }
+  Node* Int64Mul(Node* a, Node* b) {
+    return NewNode(machine()->Int64Mul(), a, b);
+  }
+  Node* Int64Div(Node* a, Node* b) {
+    return NewNode(machine()->Int64Div(), a, b);
+  }
+  Node* Int64UDiv(Node* a, Node* b) {
+    return NewNode(machine()->Int64UDiv(), a, b);
+  }
+  Node* Int64Mod(Node* a, Node* b) {
+    return NewNode(machine()->Int64Mod(), a, b);
+  }
+  Node* Int64UMod(Node* a, Node* b) {
+    return NewNode(machine()->Int64UMod(), a, b);
+  }
+  Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
+  Node* Int64LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Int64LessThan(), a, b);
+  }
+  Node* Int64LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+  }
+  Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
+  Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
+    return Int64LessThanOrEqual(b, a);
+  }
+
+  // TODO(turbofan): What is this used for?
+  Node* ConvertIntPtrToInt32(Node* a) {
+    return kPointerSize == 8 ? NewNode(machine()->TruncateInt64ToInt32(), a)
+                             : a;
+  }
+  Node* ConvertInt32ToIntPtr(Node* a) {
+    return kPointerSize == 8 ? NewNode(machine()->ChangeInt32ToInt64(), a) : a;
+  }
+
+#define INTPTR_BINOP(prefix, name)                     \
+  Node* IntPtr##name(Node* a, Node* b) {               \
+    return kPointerSize == 8 ? prefix##64##name(a, b)  \
+                             : prefix##32##name(a, b); \
+  }
+
+  INTPTR_BINOP(Int, Add);
+  INTPTR_BINOP(Int, Sub);
+  INTPTR_BINOP(Int, LessThan);
+  INTPTR_BINOP(Int, LessThanOrEqual);
+  INTPTR_BINOP(Word, Equal);
+  INTPTR_BINOP(Word, NotEqual);
+  INTPTR_BINOP(Int, GreaterThanOrEqual);
+  INTPTR_BINOP(Int, GreaterThan);
+
+#undef INTPTR_BINOP
+
+  Node* Float64Add(Node* a, Node* b) {
+    return NewNode(machine()->Float64Add(), a, b);
+  }
+  Node* Float64Sub(Node* a, Node* b) {
+    return NewNode(machine()->Float64Sub(), a, b);
+  }
+  Node* Float64Mul(Node* a, Node* b) {
+    return NewNode(machine()->Float64Mul(), a, b);
+  }
+  Node* Float64Div(Node* a, Node* b) {
+    return NewNode(machine()->Float64Div(), a, b);
+  }
+  Node* Float64Mod(Node* a, Node* b) {
+    return NewNode(machine()->Float64Mod(), a, b);
+  }
+  Node* Float64Equal(Node* a, Node* b) {
+    return NewNode(machine()->Float64Equal(), a, b);
+  }
+  Node* Float64NotEqual(Node* a, Node* b) {
+    return WordBinaryNot(Float64Equal(a, b));
+  }
+  Node* Float64LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Float64LessThan(), a, b);
+  }
+  Node* Float64LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Float64LessThanOrEqual(), a, b);
+  }
+  Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
+  Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
+    return Float64LessThanOrEqual(b, a);
+  }
+
+  // Conversions.
+  Node* ChangeInt32ToFloat64(Node* a) {
+    return NewNode(machine()->ChangeInt32ToFloat64(), a);
+  }
+  Node* ChangeUint32ToFloat64(Node* a) {
+    return NewNode(machine()->ChangeUint32ToFloat64(), a);
+  }
+  Node* ChangeFloat64ToInt32(Node* a) {
+    return NewNode(machine()->ChangeFloat64ToInt32(), a);
+  }
+  Node* ChangeFloat64ToUint32(Node* a) {
+    return NewNode(machine()->ChangeFloat64ToUint32(), a);
+  }
+  Node* ChangeInt32ToInt64(Node* a) {
+    return NewNode(machine()->ChangeInt32ToInt64(), a);
+  }
+  Node* ChangeUint32ToUint64(Node* a) {
+    return NewNode(machine()->ChangeUint32ToUint64(), a);
+  }
+  Node* TruncateFloat64ToInt32(Node* a) {
+    return NewNode(machine()->TruncateFloat64ToInt32(), a);
+  }
+  Node* TruncateInt64ToInt32(Node* a) {
+    return NewNode(machine()->TruncateInt64ToInt32(), a);
+  }
+
+  // Parameters.
+  Node* Parameter(size_t index);
+
+  // Control flow.
+  Label* Exit();
+  void Goto(Label* label);
+  void Branch(Node* condition, Label* true_val, Label* false_val);
+  // Call through CallFunctionStub with lazy deopt and frame-state.
+  Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
+                          Node* frame_state, CallFunctionFlags flags);
+  // Call to a JS function with zero parameters.
+  Node* CallJS0(Node* function, Node* receiver, Node* context,
+                Node* frame_state);
+  // Call to a runtime function with zero parameters.
+  Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
+                     Node* frame_state);
+  void Return(Node* value);
+  void Bind(Label* label);
+  void Deoptimize(Node* state);
+
+  // Variables.
+  Node* Phi(MachineType type, Node* n1, Node* n2) {
+    return NewNode(common()->Phi(type, 2), n1, n2);
+  }
+  Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
+    return NewNode(common()->Phi(type, 3), n1, n2, n3);
+  }
+  Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
+    return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
+  }
+
+  // MachineAssembler is invalid after export.
+  Schedule* Export();
+
+ protected:
+  virtual Node* MakeNode(const Operator* op, int input_count,
+                         Node** inputs) FINAL;
+
+  bool ScheduleValid() { return schedule_ != NULL; }
+
+  Schedule* schedule() {
+    DCHECK(ScheduleValid());
+    return schedule_;
+  }
+
+ private:
+  BasicBlock* Use(Label* label);
+  BasicBlock* EnsureBlock(Label* label);
+  BasicBlock* CurrentBlock();
+
+  Schedule* schedule_;
+  MachineOperatorBuilder machine_;
+  CommonOperatorBuilder common_;
+  MachineSignature* machine_sig_;
+  CallDescriptor* call_descriptor_;
+  Node** parameters_;
+  Label exit_label_;
+  BasicBlock* current_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
new file mode 100644
index 0000000..972a904
--- /dev/null
+++ b/src/compiler/register-allocator.cc
@@ -0,0 +1,2232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-allocator.h"
+
+#include "src/compiler/linkage.h"
+#include "src/hydrogen.h"
+#include "src/string-stream.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
+                         InstructionOperand* hint)
+    : operand_(operand),
+      hint_(hint),
+      pos_(pos),
+      next_(NULL),
+      requires_reg_(false),
+      register_beneficial_(true) {
+  if (operand_ != NULL && operand_->IsUnallocated()) {
+    const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
+    requires_reg_ = unalloc->HasRegisterPolicy();
+    register_beneficial_ = !unalloc->HasAnyPolicy();
+  }
+  DCHECK(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+  return hint_ != NULL && !hint_->IsUnallocated();
+}
+
+
+bool UsePosition::RequiresRegister() const { return requires_reg_; }
+
+
+bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
+
+
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+  DCHECK(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new (zone) UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    DCHECK(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+LiveRange::LiveRange(int id, Zone* zone)
+    : id_(id),
+      spilled_(false),
+      is_phi_(false),
+      is_non_loop_phi_(false),
+      kind_(UNALLOCATED_REGISTERS),
+      assigned_register_(kInvalidAssignment),
+      last_interval_(NULL),
+      first_interval_(NULL),
+      first_pos_(NULL),
+      parent_(NULL),
+      next_(NULL),
+      current_interval_(NULL),
+      last_processed_use_(NULL),
+      current_hint_operand_(NULL),
+      spill_operand_(new (zone) InstructionOperand()),
+      spill_start_index_(kMaxInt) {}
+
+
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
+  DCHECK(!HasRegisterAssigned() && !IsSpilled());
+  assigned_register_ = reg;
+  ConvertOperands(zone);
+}
+
+
+void LiveRange::MakeSpilled(Zone* zone) {
+  DCHECK(!IsSpilled());
+  DCHECK(TopLevel()->HasAllocatedSpillOperand());
+  spilled_ = true;
+  assigned_register_ = kInvalidAssignment;
+  ConvertOperands(zone);
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+  DCHECK(spill_operand_ != NULL);
+  return !spill_operand_->IsIgnored();
+}
+
+
+void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+  DCHECK(!operand->IsUnallocated());
+  DCHECK(spill_operand_ != NULL);
+  DCHECK(spill_operand_->IsIgnored());
+  spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = first_pos();
+  UsePosition* prev = NULL;
+  while (pos != NULL && pos->pos().Value() < start.Value()) {
+    if (pos->RegisterIsBeneficial()) prev = pos;
+    pos = pos->next();
+  }
+  return prev;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return use_pos->pos().Value() >
+         pos.NextInstruction().InstructionEnd().Value();
+}
+
+
+InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+  InstructionOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    DCHECK(!IsSpilled());
+    switch (Kind()) {
+      case GENERAL_REGISTERS:
+        op = RegisterOperand::Create(assigned_register(), zone);
+        break;
+      case DOUBLE_REGISTERS:
+        op = DoubleRegisterOperand::Create(assigned_register(), zone);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (IsSpilled()) {
+    DCHECK(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    DCHECK(!op->IsUnallocated());
+  } else {
+    UnallocatedOperand* unalloc =
+        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start = current_interval_ == NULL
+                               ? LifetimePosition::Invalid()
+                               : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
+                        Zone* zone) {
+  DCHECK(Start().Value() < position.Value());
+  DCHECK(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+
+  // If the split position coincides with the beginning of a use interval
+  // we need to split use positons in a special way.
+  bool split_at_start = false;
+
+  if (current->start().Value() == position.Value()) {
+    // When splitting at start we need to locate the previous use interval.
+    current = first_interval_;
+  }
+
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position, zone);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) {
+      split_at_start = (next->start().Value() == position.Value());
+      break;
+    }
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ =
+      (last_interval_ == before)
+          ? after            // Only interval in the range after split.
+          : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  if (split_at_start) {
+    // The split position coincides with the beginning of a use interval (the
+    // end of a lifetime hole). Use at this position should be attributed to
+    // the split child because split child owns use interval covering it.
+    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  } else {
+    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Discard cached iteration state. It might be pointing
+  // to the use that no longer belongs to this live range.
+  last_processed_use_ = NULL;
+  current_interval_ = NULL;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->kind_ = result->parent_->kind_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = first_pos();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
+                                start.Value());
+  DCHECK(first_interval_ != NULL);
+  DCHECK(first_interval_->start().Value() <= start.Value());
+  DCHECK(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                                id_, start.Value(), end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
+                                start.Value(), end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new (zone) UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new (zone) UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      DCHECK(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+void LiveRange::AddUsePosition(LifetimePosition pos,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint, Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
+                                pos.Value());
+  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = NULL;
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev_hint = current->HasHint() ? current : prev_hint;
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  if (prev_hint == NULL && use_pos->HasHint()) {
+    current_hint_operand_ = hint;
+  }
+}
+
+
+void LiveRange::ConvertOperands(Zone* zone) {
+  InstructionOperand* op = CreateAssignedOperand(zone);
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    DCHECK(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search; interval != NULL;
+       interval = interval->next()) {
+    DCHECK(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+RegisterAllocator::RegisterAllocator(InstructionSequence* code)
+    : zone_(code->isolate()),
+      code_(code),
+      live_in_sets_(code->BasicBlockCount(), zone()),
+      live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      fixed_live_ranges_(NULL),
+      fixed_double_live_ranges_(NULL),
+      unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      active_live_ranges_(8, zone()),
+      inactive_live_ranges_(8, zone()),
+      reusable_slots_(8, zone()),
+      mode_(UNALLOCATED_REGISTERS),
+      num_registers_(-1),
+      allocation_ok_(true) {}
+
+
+void RegisterAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = code()->BasicBlockCount();
+  live_in_sets_.Initialize(block_count, zone());
+  live_in_sets_.AddBlock(NULL, block_count, zone());
+}
+
+
+BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out =
+      new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
+
+  // Process all successor blocks.
+  BasicBlock::Successors successors = block->successors();
+  for (BasicBlock::Successors::iterator i = successors.begin();
+       i != successors.end(); ++i) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    BasicBlock* successor = *i;
+    BitVector* live_in = live_in_sets_[successor->rpo_number_];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(successor->PredecessorCount()));
+    for (BasicBlock::const_iterator j = successor->begin();
+         j != successor->end(); ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      Node* input = phi->InputAt(index);
+      live_out->Add(input->id());
+    }
+  }
+
+  return live_out;
+}
+
+
+void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+                                            BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+                             block->last_instruction_index()).NextInstruction();
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end, zone());
+    iterator.Advance();
+  }
+}
+
+
+int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+}
+
+
+InstructionOperand* RegisterAllocator::AllocateFixed(
+    UnallocatedOperand* operand, int pos, bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  DCHECK(operand->HasFixedPolicy());
+  if (operand->HasFixedSlotPolicy()) {
+    operand->ConvertTo(InstructionOperand::STACK_SLOT,
+                       operand->fixed_slot_index());
+  } else if (operand->HasFixedRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
+  } else if (operand->HasFixedDoubleRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    Instruction* instr = InstructionAt(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand, code_zone());
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
+  DCHECK(index < Register::kMaxNumAllocatableRegisters);
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    // TODO(titzer): add a utility method to allocate a new LiveRange:
+    // The LiveRange object itself can go in this zone, but the
+    // InstructionOperand needs
+    // to go in the code zone, since it may survive register allocation.
+    result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = GENERAL_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
+  DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = DOUBLE_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(index, code_zone());
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
+  int last_instruction = block->last_instruction_index();
+  return code()->GapAt(last_instruction - 1);
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void RegisterAllocator::Define(LifetimePosition position,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction(), zone());
+    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+}
+
+
+void RegisterAllocator::Use(LifetimePosition block_start,
+                            LifetimePosition position,
+                            InstructionOperand* operand,
+                            InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+  range->AddUseInterval(block_start, position, zone());
+}
+
+
+void RegisterAllocator::AddConstraintsGapMove(int index,
+                                              InstructionOperand* from,
+                                              InstructionOperand* to) {
+  GapInstruction* gap = code()->GapAt(index);
+  ParallelMove* move =
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+  if (from->IsUnallocated()) {
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      MoveOperands cur = move_operands->at(i);
+      InstructionOperand* cur_to = cur.destination();
+      if (cur_to->IsUnallocated()) {
+        if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
+            UnallocatedOperand::cast(from)->virtual_register()) {
+          move->AddMove(cur.source(), to, code_zone());
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to, code_zone());
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  DCHECK_NE(-1, start);
+  for (int i = start; i <= end; ++i) {
+    if (code()->IsGapAt(i)) {
+      Instruction* instr = NULL;
+      Instruction* prev_instr = NULL;
+      if (i < end) instr = InstructionAt(i + 1);
+      if (i > start) prev_instr = InstructionAt(i - 1);
+      MeetConstraintsBetween(prev_instr, instr, i);
+      if (!AllocationOk()) return;
+    }
+  }
+
+  // Meet register constraints for the instruction in the end.
+  if (!code()->IsGapAt(end)) {
+    MeetRegisterConstraintsForLastInstructionInBlock(block);
+  }
+}
+
+
+void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
+    BasicBlock* block) {
+  int end = block->last_instruction_index();
+  Instruction* last_instruction = InstructionAt(end);
+  for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
+    InstructionOperand* output_operand = last_instruction->OutputAt(i);
+    DCHECK(!output_operand->IsConstant());
+    UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+    int output_vreg = output->virtual_register();
+    LiveRange* range = LiveRangeFor(output_vreg);
+    bool assigned = false;
+    if (output->HasFixedPolicy()) {
+      AllocateFixed(output, -1, false);
+      // This value is produced on the stack, we never need to spill it.
+      if (output->IsStackSlot()) {
+        range->SetSpillOperand(output);
+        range->SetSpillStartIndex(end);
+        assigned = true;
+      }
+
+      BasicBlock::Successors successors = block->successors();
+      for (BasicBlock::Successors::iterator succ = successors.begin();
+           succ != successors.end(); ++succ) {
+        DCHECK((*succ)->PredecessorCount() == 1);
+        int gap_index = (*succ)->first_instruction_index() + 1;
+        DCHECK(code()->IsGapAt(gap_index));
+
+        // Create an unconstrained operand for the same virtual register
+        // and insert a gap move from the fixed output to the operand.
+        UnallocatedOperand* output_copy =
+            new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+        output_copy->set_virtual_register(output_vreg);
+
+        code()->AddGapMove(gap_index, output, output_copy);
+      }
+    }
+
+    if (!assigned) {
+      BasicBlock::Successors successors = block->successors();
+      for (BasicBlock::Successors::iterator succ = successors.begin();
+           succ != successors.end(); ++succ) {
+        DCHECK((*succ)->PredecessorCount() == 1);
+        int gap_index = (*succ)->first_instruction_index() + 1;
+        range->SetSpillStartIndex(gap_index);
+
+        // This move to spill operand is not a real use. Liveness analysis
+        // and splitting of live ranges do not account for it.
+        // Thus it should be inserted to a lifetime position corresponding to
+        // the instruction end.
+        GapInstruction* gap = code()->GapAt(gap_index);
+        ParallelMove* move =
+            gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+        move->AddMove(output, range->GetSpillOperand(), code_zone());
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
+                                               Instruction* second,
+                                               int gap_index) {
+  if (first != NULL) {
+    // Handle fixed temporaries.
+    for (size_t i = 0; i < first->TempCount(); i++) {
+      UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+
+    // Handle constant/fixed output operands.
+    for (size_t i = 0; i < first->OutputCount(); i++) {
+      InstructionOperand* output = first->OutputAt(i);
+      if (output->IsConstant()) {
+        int output_vreg = output->index();
+        LiveRange* range = LiveRangeFor(output_vreg);
+        range->SetSpillStartIndex(gap_index - 1);
+        range->SetSpillOperand(output);
+      } else {
+        UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+        LiveRange* range = LiveRangeFor(first_output->virtual_register());
+        bool assigned = false;
+        if (first_output->HasFixedPolicy()) {
+          UnallocatedOperand* output_copy =
+              first_output->CopyUnconstrained(code_zone());
+          bool is_tagged = HasTaggedValue(first_output->virtual_register());
+          AllocateFixed(first_output, gap_index, is_tagged);
+
+          // This value is produced on the stack, we never need to spill it.
+          if (first_output->IsStackSlot()) {
+            range->SetSpillOperand(first_output);
+            range->SetSpillStartIndex(gap_index - 1);
+            assigned = true;
+          }
+          code()->AddGapMove(gap_index, first_output, output_copy);
+        }
+
+        // Make sure we add a gap move for spilling (if we have not done
+        // so already).
+        if (!assigned) {
+          range->SetSpillStartIndex(gap_index);
+
+          // This move to spill operand is not a real use. Liveness analysis
+          // and splitting of live ranges do not account for it.
+          // Thus it should be inserted to a lifetime position corresponding to
+          // the instruction end.
+          GapInstruction* gap = code()->GapAt(gap_index);
+          ParallelMove* move =
+              gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+          move->AddMove(first_output, range->GetSpillOperand(), code_zone());
+        }
+      }
+    }
+  }
+
+  if (second != NULL) {
+    // Handle fixed input operands of second instruction.
+    for (size_t i = 0; i < second->InputCount(); i++) {
+      InstructionOperand* input = second->InputAt(i);
+      if (input->IsImmediate()) continue;  // Ignore immediates.
+      UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+      if (cur_input->HasFixedPolicy()) {
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+
+    // Handle "output same as input" for second instruction.
+    for (size_t i = 0; i < second->OutputCount(); i++) {
+      InstructionOperand* output = second->OutputAt(i);
+      if (!output->IsUnallocated()) continue;
+      UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+      if (second_output->HasSameAsInputPolicy()) {
+        DCHECK(i == 0);  // Only valid for first output.
+        UnallocatedOperand* cur_input =
+            UnallocatedOperand::cast(second->InputAt(0));
+        int output_vreg = second_output->virtual_register();
+        int input_vreg = cur_input->virtual_register();
+
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        cur_input->set_virtual_register(second_output->virtual_register());
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+        if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+          int index = gap_index + 1;
+          Instruction* instr = InstructionAt(index);
+          if (instr->HasPointerMap()) {
+            instr->pointer_map()->RecordPointer(input_copy, code_zone());
+          }
+        } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+          // The input is assumed to immediately have a tagged representation,
+          // before the pointer map can be used. I.e. the pointer map at the
+          // instruction will include the output operand (whose value at the
+          // beginning of the instruction is equal to the input operand). If
+          // this is not desired, then the pointer map at this instruction needs
+          // to be adjusted manually.
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
+                                                 int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsDoubleRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+                                            BitVector* live) {
+  int block_start = block->first_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  for (int index = block->last_instruction_index(); index >= block_start;
+       index--) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    Instruction* instr = InstructionAt(index);
+    DCHECK(instr != NULL);
+    if (instr->IsGapMoves()) {
+      // Process the moves of the gap instruction, making their sources live.
+      GapInstruction* gap = code()->GapAt(index);
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exist.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      const ZoneList<MoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        MoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        InstructionOperand* from = cur->source();
+        InstructionOperand* to = cur->destination();
+        InstructionOperand* hint = to;
+        if (to->IsUnallocated()) {
+          int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+          LiveRange* to_range = LiveRangeFor(to_vreg);
+          if (to_range->is_phi()) {
+            if (to_range->is_non_loop_phi()) {
+              hint = to_range->current_hint_operand();
+            }
+          } else {
+            if (live->Contains(to_vreg)) {
+              Define(curr_position, to, from);
+              live->Remove(to_vreg);
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          }
+        } else {
+          Define(curr_position, to, from);
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(from)->virtual_register());
+        }
+      }
+    } else {
+      // Process output, inputs, and temps of this non-gap instruction.
+      for (size_t i = 0; i < instr->OutputCount(); i++) {
+        InstructionOperand* output = instr->OutputAt(i);
+        if (output->IsUnallocated()) {
+          int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
+          live->Remove(out_vreg);
+        } else if (output->IsConstant()) {
+          int out_vreg = output->index();
+          live->Remove(out_vreg);
+        }
+        Define(curr_position, output, NULL);
+      }
+
+      if (instr->ClobbersRegisters()) {
+        for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+          if (!IsOutputRegisterOf(instr, i)) {
+            LiveRange* range = FixedLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      if (instr->ClobbersDoubleRegisters()) {
+        for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+          if (!IsOutputDoubleRegisterOf(instr, i)) {
+            LiveRange* range = FixedDoubleLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      for (size_t i = 0; i < instr->InputCount(); i++) {
+        InstructionOperand* input = instr->InputAt(i);
+        if (input->IsImmediate()) continue;  // Ignore immediates.
+        LifetimePosition use_pos;
+        if (input->IsUnallocated() &&
+            UnallocatedOperand::cast(input)->IsUsedAtStart()) {
+          use_pos = curr_position;
+        } else {
+          use_pos = curr_position.InstructionEnd();
+        }
+
+        Use(block_start_position, use_pos, input, NULL);
+        if (input->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(input)->virtual_register());
+        }
+      }
+
+      for (size_t i = 0; i < instr->TempCount(); i++) {
+        InstructionOperand* temp = instr->TempAt(i);
+        if (instr->ClobbersTemps()) {
+          if (temp->IsRegister()) continue;
+          if (temp->IsUnallocated()) {
+            UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
+            if (temp_unalloc->HasFixedPolicy()) {
+              continue;
+            }
+          }
+        }
+        Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+        Define(curr_position, temp, NULL);
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis(BasicBlock* block) {
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    Node* phi = *i;
+    if (phi->opcode() != IrOpcode::kPhi) continue;
+
+    UnallocatedOperand* phi_operand =
+        new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
+    phi_operand->set_virtual_register(phi->id());
+
+    int j = 0;
+    Node::Inputs inputs = phi->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter, ++j) {
+      Node* op = *iter;
+      // TODO(mstarzinger): Use a ValueInputIterator instead.
+      if (j >= block->PredecessorCount()) continue;
+      UnallocatedOperand* operand =
+          new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+      operand->set_virtual_register(op->id());
+      BasicBlock* cur_block = block->PredecessorAt(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
+                         phi_operand);
+
+      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
+      DCHECK(!branch->HasPointerMap());
+      USE(branch);
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    BlockStartInstruction* block_start = code()->GetBlockStart(block);
+    block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+        ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
+    live_range->SetSpillStartIndex(block->first_instruction_index());
+
+    // We use the phi-ness of some nodes in some later heuristics.
+    live_range->set_is_phi(true);
+    if (!block->IsLoopHeader()) {
+      live_range->set_is_non_loop_phi(true);
+    }
+  }
+}
+
+
+bool RegisterAllocator::Allocate() {
+  assigned_registers_ = new (code_zone())
+      BitVector(Register::NumAllocatableRegisters(), code_zone());
+  assigned_double_registers_ = new (code_zone())
+      BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
+  MeetRegisterConstraints();
+  if (!AllocationOk()) return false;
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  if (!AllocationOk()) return false;
+  AllocateDoubleRegisters();
+  if (!AllocationOk()) return false;
+  PopulatePointerMaps();
+  ConnectRanges();
+  ResolveControlFlow();
+  code()->frame()->SetAllocatedRegisters(assigned_registers_);
+  code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+  return true;
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints() {
+  RegisterAllocatorPhase phase("L_Register constraints", this);
+  for (int i = 0; i < code()->BasicBlockCount(); ++i) {
+    MeetRegisterConstraints(code()->BlockAt(i));
+    if (!AllocationOk()) return;
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis() {
+  RegisterAllocatorPhase phase("L_Resolve phis", this);
+
+  // Process the blocks in reverse order.
+  for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
+    ResolvePhis(code()->BlockAt(i));
+  }
+}
+
+
+void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                                           BasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      DCHECK(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      DCHECK(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  DCHECK(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    InstructionOperand* pred_op =
+        pred_cover->CreateAssignedOperand(code_zone());
+    InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
+    if (!pred_op->Equals(cur_op)) {
+      GapInstruction* gap = NULL;
+      if (block->PredecessorCount() == 1) {
+        gap = code()->GapAt(block->first_instruction_index());
+      } else {
+        DCHECK(pred->SuccessorCount() == 1);
+        gap = GetLastGap(pred);
+
+        Instruction* branch = InstructionAt(pred->last_instruction_index());
+        DCHECK(!branch->HasPointerMap());
+        USE(branch);
+      }
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+          ->AddMove(pred_op, cur_op, code_zone());
+    }
+  }
+}
+
+
+ParallelMove* RegisterAllocator::GetConnectingParallelMove(
+    LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (code()->IsGapAt(index)) {
+    GapInstruction* gap = code()->GapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
+        code_zone());
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
+      code_zone());
+}
+
+
+BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
+  return code()->GetBasicBlock(pos.InstructionIndex());
+}
+
+
+void RegisterAllocator::ConnectRanges() {
+  RegisterAllocatorPhase phase("L_Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            ParallelMove* move = GetConnectingParallelMove(pos);
+            InstructionOperand* prev_operand =
+                first_range->CreateAssignedOperand(code_zone());
+            InstructionOperand* cur_operand =
+                second_range->CreateAssignedOperand(code_zone());
+            move->AddMove(prev_operand, cur_operand, code_zone());
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
+  if (block->PredecessorCount() != 1) return false;
+  return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
+}
+
+
+void RegisterAllocator::ResolveControlFlow() {
+  RegisterAllocatorPhase phase("L_Resolve control flow", this);
+  for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->rpo_number_];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      BasicBlock::Predecessors predecessors = block->predecessors();
+      for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+           i != predecessors.end(); ++i) {
+        BasicBlock* cur = *i;
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void RegisterAllocator::BuildLiveRanges() {
+  RegisterAllocatorPhase phase("L_Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
+       --block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    for (BasicBlock::const_iterator i = block->begin(); i != block->end();
+         ++i) {
+      Node* phi = *i;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // The live range interval already ends at the first instruction of the
+      // block.
+      live->Remove(phi->id());
+
+      InstructionOperand* hint = NULL;
+      InstructionOperand* phi_operand = NULL;
+      GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exit.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        InstructionOperand* to = move->move_operands()->at(j).destination();
+        if (to->IsUnallocated() &&
+            UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
+          hint = move->move_operands()->at(j).source();
+          phi_operand = to;
+          break;
+        }
+      }
+      DCHECK(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    if (block->IsLoopHeader()) {
+      // Add a live range stretching from the first loop instruction to the last
+      // for each value live on entry to the header.
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      int end_index =
+          code()->BlockAt(block->loop_end_)->last_instruction_index();
+      LifetimePosition end =
+          LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end, zone());
+        iterator.Advance();
+      }
+
+      // Insert all values into the live in sets of all blocks in the loop.
+      for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        PrintF("Register allocator error: live v%d reached first block.\n",
+               operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
+        CompilationInfo* info = code()->linkage()->info();
+        if (info->IsStub()) {
+          if (info->code_stub() == NULL) {
+            PrintF("\n");
+          } else {
+            CodeStub::Major major_key = info->code_stub()->MajorKey();
+            PrintF("  (function: %s)\n", CodeStub::MajorName(major_key, false));
+          }
+        } else {
+          DCHECK(info->IsOptimizing());
+          AllowHandleDereference allow_deref;
+          PrintF("  (function: %s)\n",
+                 info->function()->debug_name()->ToCString().get());
+        }
+        iterator.Advance();
+      }
+      DCHECK(!found);
+    }
+#endif
+  }
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+
+      // TODO(bmeurer): This is a horrible hack to make sure that for constant
+      // live ranges, every use requires the constant to be in a register.
+      // Without this hack, all uses with "any" policy would get the constant
+      // operand assigned.
+      LiveRange* range = live_ranges_[i];
+      if (range->HasAllocatedSpillOperand() &&
+          range->GetSpillOperand()->IsConstant()) {
+        for (UsePosition* pos = range->first_pos(); pos != NULL;
+             pos = pos->next_) {
+          pos->register_beneficial_ = true;
+          pos->requires_reg_ = true;
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::SafePointsAreInOrder() const {
+  int safe_point = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
+       it != pointer_maps->end(); ++it) {
+    PointerMap* map = *it;
+    if (safe_point > map->instruction_position()) return false;
+    safe_point = map->instruction_position();
+  }
+  return true;
+}
+
+
+void RegisterAllocator::PopulatePointerMaps() {
+  RegisterAllocatorPhase phase("L_Populate pointer maps", this);
+
+  DCHECK(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int last_range_start = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  PointerMapDeque::const_iterator first_it = pointer_maps->begin();
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-reference values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      DCHECK(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when they
+    // step backwards and reset the first_it so we don't miss any safe points.
+    if (start < last_range_start) first_it = pointer_maps->begin();
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    for (; first_it != pointer_maps->end(); ++first_it) {
+      PointerMap* map = *first_it;
+      if (map->instruction_position() >= start) break;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (PointerMapDeque::const_iterator it = first_it;
+         it != pointer_maps->end(); ++it) {
+      PointerMap* map = *it;
+      int safe_point = map->instruction_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index() &&
+          !range->GetSpillOperand()->IsConstant()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand(), code_zone());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc(
+            "Pointer in register for range %d (start at %d) "
+            "at safe point %d\n",
+            cur->id(), cur->Start().Value(), safe_point);
+        InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
+        DCHECK(!operand->IsStackSlot());
+        map->RecordPointer(operand, code_zone());
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::AllocateGeneralRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate general registers", this);
+  num_registers_ = Register::NumAllocatableRegisters();
+  mode_ = GENERAL_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateDoubleRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate double registers", this);
+  num_registers_ = DoubleRegister::NumAllocatableRegisters();
+  mode_ = DOUBLE_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateRegisters() {
+  DCHECK(unhandled_live_ranges_.is_empty());
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (live_ranges_[i]->Kind() == mode_) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  DCHECK(UnhandledIsSorted());
+
+  DCHECK(reusable_slots_.is_empty());
+  DCHECK(active_live_ranges_.is_empty());
+  DCHECK(inactive_live_ranges_.is_empty());
+
+  if (mode_ == DOUBLE_REGISTERS) {
+    for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    DCHECK(mode_ == GENERAL_REGISTERS);
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    DCHECK(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    DCHECK(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+#ifdef DEBUG
+    allocation_finger_ = position;
+#endif
+    TraceAlloc("Processing interval %d start=%d\n", current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (code()->IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        SpillBetween(current, current->Start(), pos->pos());
+        if (!AllocationOk()) return;
+        DCHECK(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!AllocationOk()) return;
+
+    if (!result) AllocateBlockedReg(current);
+    if (!AllocationOk()) return;
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  reusable_slots_.Rewind(0);
+  active_live_ranges_.Rewind(0);
+  inactive_live_ranges_.Rewind(0);
+}
+
+
+const char* RegisterAllocator::RegisterName(int allocation_index) {
+  if (mode_ == GENERAL_REGISTERS) {
+    return Register::AllocationIndexToString(allocation_index);
+  } else {
+    return DoubleRegister::AllocationIndexToString(allocation_index);
+  }
+}
+
+
+void RegisterAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
+  return code()->IsReference(virtual_register);
+}
+
+
+RegisterKind RegisterAllocator::RequiredRegisterKind(
+    int virtual_register) const {
+  return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+                                              : GENERAL_REGISTERS;
+}
+
+
+void RegisterAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  DCHECK(allocation_finger_.Value() <= range->Start().Value());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range, zone());
+      DCHECK(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range, zone());
+  DCHECK(UnhandledIsSorted());
+}
+
+
+void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range, zone());
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void RegisterAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool RegisterAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+  if (spill_operand->IsConstant()) return;
+  if (spill_operand->index() >= 0) {
+    reusable_slots_.Add(range, zone());
+  }
+}
+
+
+InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  InstructionOperand* result =
+      reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void RegisterAllocator::ActiveToHandled(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::ActiveToInactive(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void RegisterAllocator::InactiveToHandled(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::InactiveToActive(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+              Register::kMaxNumAllocatableRegisters);
+
+
+bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    free_until_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_until_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    DCHECK(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+  }
+
+  InstructionOperand* hint = current->FirstHint();
+  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+    int register_index = hint->index();
+    TraceAlloc(
+        "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+        RegisterName(register_index), free_until_pos[register_index].Value(),
+        current->id(), current->End().Value());
+
+    // The desired register is free until the end of the current live range.
+    if (free_until_pos[register_index].Value() >= current->End().Value()) {
+      TraceAlloc("Assigning preferred reg %s to live range %d\n",
+                 RegisterName(register_index), current->id());
+      SetLiveRangeAssignedRegister(current, register_index);
+      return true;
+    }
+  }
+
+  // Find the register which stays free for the longest time.
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = free_until_pos[reg];
+
+  if (pos.Value() <= current->Start().Value()) {
+    // All registers are blocked.
+    return false;
+  }
+
+  if (pos.Value() < current->End().Value()) {
+    // Register reg is available at the range start but becomes blocked before
+    // the range end. Split current at position where it becomes blocked.
+    LiveRange* tail = SplitRangeAt(current, pos);
+    if (!AllocationOk()) return false;
+    AddToUnhandledSorted(tail);
+  }
+
+
+  // Register reg is available at the range start and is free until
+  // the range end.
+  DCHECK(pos.Value() >= current->End().Value());
+  TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  return true;
+}
+
+
+void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
+  UsePosition* register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == NULL) {
+    // There is no use in the current live range that requires a register.
+    // We can just spill it.
+    Spill(current);
+    return;
+  }
+
+
+  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use =
+          range->NextUsePositionRegisterIsBeneficial(current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    DCHECK(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (use_pos[i].Value() > use_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = use_pos[reg];
+
+  if (pos.Value() < register_use->pos().Value()) {
+    // All registers are blocked before the first use that requires a register.
+    // Spill starting part of live range up to that use.
+    SpillBetween(current, current->Start(), register_use->pos());
+    return;
+  }
+
+  if (block_pos[reg].Value() < current->End().Value()) {
+    // Register becomes blocked before the current range end. Split before that
+    // position.
+    LiveRange* tail = SplitBetween(current, current->Start(),
+                                   block_pos[reg].InstructionStart());
+    if (!AllocationOk()) return;
+    AddToUnhandledSorted(tail);
+  }
+
+  // Register reg is not blocked for the whole range.
+  DCHECK(block_pos[reg].Value() >= current->End().Value());
+  TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  // This register was not free. Thus we need to find and spill
+  // parts of active and inactive live regions that use the same register
+  // at the same lifetime positions as current.
+  SplitAndSpillIntersecting(current);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+    LiveRange* range, LifetimePosition pos) {
+  BasicBlock* block = GetBlock(pos.InstructionStart());
+  BasicBlock* loop_header =
+      block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
+
+  if (loop_header == NULL) return pos;
+
+  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+  while (loop_header != NULL) {
+    // We are going to spill live range inside the loop.
+    // If possible try to move spilling position backwards to loop header.
+    // This will reduce number of memory moves on the back edge.
+    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+        loop_header->first_instruction_index());
+
+    if (range->Covers(loop_start)) {
+      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+        // No register beneficial use inside the loop before the pos.
+        pos = loop_start;
+      }
+    }
+
+    // Try hoisting out to an outer loop.
+    loop_header = code()->GetContainingLoop(loop_header);
+  }
+
+  return pos;
+}
+
+
+void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  DCHECK(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos = current->Start();
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == NULL) {
+        SpillAfter(range, spill_pos);
+      } else {
+        // When spilling between spill_pos and next_pos ensure that the range
+        // remains spilled at least until the start of the current live range.
+        // This guarantees that we will not introduce new unhandled ranges that
+        // start before the current range as this violates allocation invariant
+        // and will lead to an inconsistent state of active and inactive
+        // live-ranges: ranges are allocated in order of their start positions,
+        // ranges are retired from active/inactive when the start of the
+        // current live-range is larger than their end.
+        SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+      }
+      if (!AllocationOk()) return;
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    DCHECK(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SpillAfter(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SpillBetween(range, split_pos, next_intersection);
+        }
+        if (!AllocationOk()) return;
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+         InstructionAt(pos.InstructionIndex())->IsBlockStart();
+}
+
+
+LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
+                                           LifetimePosition pos) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+  if (pos.Value() <= range->Start().Value()) return range;
+
+  // We can't properly connect liveranges if split occured at the end
+  // of control instruction.
+  DCHECK(pos.IsInstructionStart() ||
+         !InstructionAt(pos.InstructionIndex())->IsControl());
+
+  int vreg = GetVirtualRegister();
+  if (!AllocationOk()) return NULL;
+  LiveRange* result = LiveRangeFor(vreg);
+  range->SplitAt(pos, result, zone());
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
+                                           LifetimePosition start,
+                                           LifetimePosition end) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+             range->id(), start.Value(), end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  DCHECK(split_pos.Value() >= start.Value());
+  return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                        LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  DCHECK(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  BasicBlock* start_block = GetBlock(start);
+  BasicBlock* end_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
+    return end;
+  }
+
+  BasicBlock* block = end_block;
+  // Find header of outermost loop.
+  // TODO(titzer): fix redundancy below.
+  while (code()->GetContainingLoop(block) != NULL &&
+         code()->GetContainingLoop(block)->rpo_number_ >
+             start_block->rpo_number_) {
+    block = code()->GetContainingLoop(block);
+  }
+
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+  LiveRange* second_part = SplitRangeAt(range, pos);
+  if (!AllocationOk()) return;
+  Spill(second_part);
+}
+
+
+void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+                                     LifetimePosition end) {
+  SpillBetweenUntil(range, start, start, end);
+}
+
+
+void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
+                                          LifetimePosition start,
+                                          LifetimePosition until,
+                                          LifetimePosition end) {
+  CHECK(start.Value() < end.Value());
+  LiveRange* second_part = SplitRangeAt(range, start);
+  if (!AllocationOk()) return;
+
+  if (second_part->Start().Value() < end.Value()) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    LiveRange* third_part = SplitBetween(
+        second_part, Max(second_part->Start().InstructionEnd(), until),
+        end.PrevInstruction().InstructionEnd());
+    if (!AllocationOk()) return;
+
+    DCHECK(third_part != second_part);
+
+    Spill(second_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just put it to unhandled as whole.
+    AddToUnhandledSorted(second_part);
+  }
+}
+
+
+void RegisterAllocator::Spill(LiveRange* range) {
+  DCHECK(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    InstructionOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) {
+      // Allocate a new operand referring to the spill slot.
+      RegisterKind kind = range->Kind();
+      int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+      if (kind == DOUBLE_REGISTERS) {
+        op = DoubleStackSlotOperand::Create(index, zone());
+      } else {
+        DCHECK(kind == GENERAL_REGISTERS);
+        op = StackSlotOperand::Create(index, zone());
+      }
+    }
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled(code_zone());
+}
+
+
+int RegisterAllocator::RegisterCount() const { return num_registers_; }
+
+
+#ifdef DEBUG
+
+
+void RegisterAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
+                                                     int reg) {
+  if (range->Kind() == DOUBLE_REGISTERS) {
+    assigned_double_registers_->Add(reg);
+  } else {
+    DCHECK(range->Kind() == GENERAL_REGISTERS);
+    assigned_registers_->Add(reg);
+  }
+  range->set_assigned_register(reg, code_zone());
+}
+
+
+RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name,
+                                               RegisterAllocator* allocator)
+    : CompilationPhase(name, allocator->code()->linkage()->info()),
+      allocator_(allocator) {
+  if (FLAG_turbo_stats) {
+    allocator_zone_start_allocation_size_ =
+        allocator->zone()->allocation_size();
+  }
+}
+
+
+RegisterAllocatorPhase::~RegisterAllocatorPhase() {
+  if (FLAG_turbo_stats) {
+    unsigned size = allocator_->zone()->allocation_size() -
+                    allocator_zone_start_allocation_size_;
+    isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
+  }
+#ifdef DEBUG
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
new file mode 100644
index 0000000..881ce37
--- /dev/null
+++ b/src/compiler/register-allocator.h
@@ -0,0 +1,548 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class BitVector;
+class InstructionOperand;
+class UnallocatedOperand;
+class ParallelMove;
+class PointerMap;
+
+namespace compiler {
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+
+// This class represents a single point of a InstructionOperand's lifetime. For
+// each instruction there are exactly two lifetime positions: the beginning and
+// the end of the instruction. Lifetime positions for different instructions are
+// disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const { return value_; }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    DCHECK(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    DCHECK(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep / 2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    DCHECK(IsValid());
+    DCHECK(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+  static inline LifetimePosition MaxPosition() {
+    // We have to use this kind of getter instead of static member due to
+    // crash bug in GDB.
+    return LifetimePosition(kMaxInt);
+  }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) {}
+
+  int value_;
+};
+
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval : public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    DCHECK(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos, Zone* zone);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+};
+
+// Representation of a use position.
+class UsePosition : public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, InstructionOperand* operand,
+              InstructionOperand* hint);
+
+  InstructionOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  InstructionOperand* hint() const { return hint_; }
+  bool HasHint() const;
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+  void set_next(UsePosition* next) { next_ = next; }
+
+  InstructionOperand* const operand_;
+  InstructionOperand* const hint_;
+  LifetimePosition const pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange : public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  LiveRange(int id, Zone* zone);
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  InstructionOperand* CreateAssignedOperand(Zone* zone);
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, Zone* zone);
+  void MakeSpilled(Zone* zone);
+  bool is_phi() const { return is_phi_; }
+  void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
+  bool is_non_loop_phi() const { return is_non_loop_phi_; }
+  void set_is_non_loop_phi(bool is_non_loop_phi) {
+    is_non_loop_phi_ = is_non_loop_phi;
+  }
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which precedes start.
+  UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  // Split this live range at the given position which must follow the start of
+  // the range.
+  // All uses following the given position will be moved from this
+  // live range to the result live range.
+  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+  RegisterKind Kind() const { return kind_; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+
+  InstructionOperand* current_hint_operand() const {
+    DCHECK(current_hint_operand_ == FirstHint());
+    return current_hint_operand_;
+  }
+  InstructionOperand* FirstHint() const {
+    UsePosition* pos = first_pos_;
+    while (pos != NULL && !pos->HasHint()) pos = pos->next();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    DCHECK(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    DCHECK(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const;
+  InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(InstructionOperand* operand);
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUsePosition(LifetimePosition pos, InstructionOperand* operand,
+                      InstructionOperand* hint, Zone* zone);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands(Zone* zone);
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  bool is_phi_;
+  bool is_non_loop_phi_;
+  RegisterKind kind_;
+  int assigned_register_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  // This is used as a cache, it's invalid outside of BuildLiveRanges.
+  InstructionOperand* current_hint_operand_;
+  InstructionOperand* spill_operand_;
+  int spill_start_index_;
+
+  friend class RegisterAllocator;  // Assigns to kind_.
+};
+
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+  explicit RegisterAllocator(InstructionSequence* code);
+
+  static void TraceAlloc(const char* msg, ...);
+
+  // Checks whether the value of a given virtual register is a reference.
+  // TODO(titzer): rename this to IsReference.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  bool Allocate();
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const Vector<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const Vector<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  inline InstructionSequence* code() const { return code_; }
+
+  // This zone is for datastructures only needed during register allocation.
+  inline Zone* zone() { return &zone_; }
+
+  // This zone is for InstructionOperands and moves that live beyond register
+  // allocation.
+  inline Zone* code_zone() { return code()->zone(); }
+
+  int GetVirtualRegister() {
+    int vreg = code()->NextVirtualRegister();
+    if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
+      allocation_ok_ = false;
+      // Maintain the invariant that we return something below the maximum.
+      return 0;
+    }
+    return vreg;
+  }
+
+  bool AllocationOk() { return allocation_ok_; }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+  BitVector* assigned_registers() { return assigned_registers_; }
+  BitVector* assigned_double_registers() { return assigned_double_registers_; }
+
+ private:
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(BasicBlock* block);
+  void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+  bool IsOutputRegisterOf(Instruction* instr, int index);
+  bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
+  void ProcessInstructions(BasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(BasicBlock* block);
+  void MeetConstraintsBetween(Instruction* first, Instruction* second,
+                              int gap_index);
+  void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
+  void ResolvePhis(BasicBlock* block);
+
+  // Helper methods for building intervals.
+  InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
+                                    bool is_tagged);
+  LiveRange* LiveRangeFor(InstructionOperand* operand);
+  void Define(LifetimePosition position, InstructionOperand* operand,
+              InstructionOperand* hint);
+  void Use(LifetimePosition block_start, LifetimePosition position,
+           InstructionOperand* operand, InstructionOperand* hint);
+  void AddConstraintsGapMove(int index, InstructionOperand* from,
+                             InstructionOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+
+  // Live range splitting helpers.
+
+  // Split the given range at the given position.
+  // If range starts at or after the given position then the
+  // original range is returned.
+  // Otherwise returns the live range that starts at pos and contains
+  // all uses from the original range that follow pos. Uses at pos will
+  // still be owned by the original range after splitting.
+  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+  // Split the given range in a position from the interval [start, end].
+  LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
+                          LifetimePosition end);
+
+  // Find a lifetime position in the interval [start, end] which
+  // is optimal for splitting: it is either header of the outermost
+  // loop covered by this interval or the latest possible position.
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+
+  // Spill the given life range after position pos.
+  void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+  // Spill the given life range after position [start] and up to position [end].
+  void SpillBetween(LiveRange* range, LifetimePosition start,
+                    LifetimePosition end);
+
+  // Spill the given life range after position [start] and up to position [end].
+  // Range is guaranteed to be spilled at least until position [until].
+  void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
+                         LifetimePosition until, LifetimePosition end);
+
+  void SplitAndSpillIntersecting(LiveRange* range);
+
+  // If we are trying to spill a range inside the loop try to
+  // hoist spill position out to the point just before the loop.
+  LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+                                          LifetimePosition pos);
+
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                          BasicBlock* pred);
+
+  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  BasicBlock* GetBlock(LifetimePosition pos);
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  GapInstruction* GetLastGap(BasicBlock* block);
+
+  const char* RegisterName(int allocation_index);
+
+  inline Instruction* InstructionAt(int index) {
+    return code()->InstructionAt(index);
+  }
+
+  Zone zone_;
+  InstructionSequence* code_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
+      fixed_live_ranges_;
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+      fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  RegisterKind mode_;
+  int num_registers_;
+
+  BitVector* assigned_registers_;
+  BitVector* assigned_double_registers_;
+
+  // Indicates success or failure during register allocation.
+  bool allocation_ok_;
+
+#ifdef DEBUG
+  LifetimePosition allocation_finger_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+
+class RegisterAllocatorPhase : public CompilationPhase {
+ public:
+  RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
+  ~RegisterAllocatorPhase();
+
+ private:
+  RegisterAllocator* allocator_;
+  unsigned allocator_zone_start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
new file mode 100644
index 0000000..aaa248e
--- /dev/null
+++ b/src/compiler/representation-change.h
@@ -0,0 +1,360 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
+#define V8_COMPILER_REPRESENTATION_CHANGE_H_
+
+#include "src/base/bits.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger {
+ public:
+  RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
+                        Isolate* isolate)
+      : jsgraph_(jsgraph),
+        simplified_(simplified),
+        isolate_(isolate),
+        testing_type_errors_(false),
+        type_error_(false) {}
+
+  // TODO(titzer): should Word64 also be implicitly convertable to others?
+  static const MachineTypeUnion rWord =
+      kRepBit | kRepWord8 | kRepWord16 | kRepWord32;
+
+  Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
+                             MachineTypeUnion use_type) {
+    if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
+      // There should be only one output representation.
+      return TypeError(node, output_type, use_type);
+    }
+    if ((use_type & kRepMask) == (output_type & kRepMask)) {
+      // Representations are the same. That's a no-op.
+      return node;
+    }
+    if ((use_type & rWord) && (output_type & rWord)) {
+      // Both are words less than or equal to 32-bits.
+      // Since loads of integers from memory implicitly sign or zero extend the
+      // value to the full machine word size and stores implicitly truncate,
+      // no representation change is necessary.
+      return node;
+    }
+    if (use_type & kRepTagged) {
+      return GetTaggedRepresentationFor(node, output_type);
+    } else if (use_type & kRepFloat64) {
+      return GetFloat64RepresentationFor(node, output_type);
+    } else if (use_type & kRepFloat32) {
+      return TypeError(node, output_type, use_type);  // TODO(titzer): handle
+    } else if (use_type & kRepBit) {
+      return GetBitRepresentationFor(node, output_type);
+    } else if (use_type & rWord) {
+      return GetWord32RepresentationFor(node, output_type,
+                                        use_type & kTypeUint32);
+    } else if (use_type & kRepWord64) {
+      return GetWord64RepresentationFor(node, output_type);
+    } else {
+      return node;
+    }
+  }
+
+  Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kHeapConstant:
+        return node;  // No change necessary.
+      case IrOpcode::kInt32Constant:
+        if (output_type & kTypeUint32) {
+          uint32_t value = OpParameter<uint32_t>(node);
+          return jsgraph()->Constant(static_cast<double>(value));
+        } else if (output_type & kTypeInt32) {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Constant(value);
+        } else if (output_type & kRepBit) {
+          return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+                                                 : jsgraph()->TrueConstant();
+        } else {
+          return TypeError(node, output_type, kRepTagged);
+        }
+      case IrOpcode::kFloat64Constant:
+        return jsgraph()->Constant(OpParameter<double>(node));
+      default:
+        break;
+    }
+    // Select the correct X -> Tagged operator.
+    const Operator* op;
+    if (output_type & kRepBit) {
+      op = simplified()->ChangeBitToBool();
+    } else if (output_type & rWord) {
+      if (output_type & kTypeUint32) {
+        op = simplified()->ChangeUint32ToTagged();
+      } else if (output_type & kTypeInt32) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else {
+        return TypeError(node, output_type, kRepTagged);
+      }
+    } else if (output_type & kRepFloat64) {
+      op = simplified()->ChangeFloat64ToTagged();
+    } else {
+      return TypeError(node, output_type, kRepTagged);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+        return jsgraph()->Float64Constant(OpParameter<double>(node));
+      case IrOpcode::kInt32Constant:
+        if (output_type & kTypeUint32) {
+          uint32_t value = OpParameter<uint32_t>(node);
+          return jsgraph()->Float64Constant(static_cast<double>(value));
+        } else {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Float64Constant(value);
+        }
+      case IrOpcode::kFloat64Constant:
+        return node;  // No change necessary.
+      default:
+        break;
+    }
+    // Select the correct X -> Float64 operator.
+    const Operator* op;
+    if (output_type & kRepBit) {
+      return TypeError(node, output_type, kRepFloat64);
+    } else if (output_type & rWord) {
+      if (output_type & kTypeUint32) {
+        op = machine()->ChangeUint32ToFloat64();
+      } else {
+        op = machine()->ChangeInt32ToFloat64();
+      }
+    } else if (output_type & kRepTagged) {
+      op = simplified()->ChangeTaggedToFloat64();
+    } else {
+      return TypeError(node, output_type, kRepFloat64);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
+                                   bool use_unsigned) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return node;  // No change necessary.
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant: {
+        double value = OpParameter<double>(node);
+        if (value < 0) {
+          DCHECK(IsInt32Double(value));
+          int32_t iv = static_cast<int32_t>(value);
+          return jsgraph()->Int32Constant(iv);
+        } else {
+          DCHECK(IsUint32Double(value));
+          int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
+          return jsgraph()->Int32Constant(iv);
+        }
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Word32 operator.
+    const Operator* op = NULL;
+    if (output_type & kRepFloat64) {
+      if (output_type & kTypeUint32 || use_unsigned) {
+        op = machine()->ChangeFloat64ToUint32();
+      } else {
+        op = machine()->ChangeFloat64ToInt32();
+      }
+    } else if (output_type & kRepTagged) {
+      if (output_type & kTypeUint32 || use_unsigned) {
+        op = simplified()->ChangeTaggedToUint32();
+      } else {
+        op = simplified()->ChangeTaggedToInt32();
+      }
+    } else {
+      return TypeError(node, output_type, kRepWord32);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant: {
+        int32_t value = OpParameter<int32_t>(node);
+        if (value == 0 || value == 1) return node;
+        return jsgraph()->OneConstant();  // value != 0
+      }
+      case IrOpcode::kHeapConstant: {
+        Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
+        DCHECK(*handle == isolate()->heap()->true_value() ||
+               *handle == isolate()->heap()->false_value());
+        return jsgraph()->Int32Constant(
+            *handle == isolate()->heap()->true_value() ? 1 : 0);
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Bit operator.
+    const Operator* op;
+    if (output_type & rWord) {
+      return node;  // No change necessary.
+    } else if (output_type & kRepWord64) {
+      return node;  // TODO(titzer): No change necessary, on 64-bit.
+    } else if (output_type & kRepTagged) {
+      op = simplified()->ChangeBoolToBit();
+    } else {
+      return TypeError(node, output_type, kRepBit);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
+    if (output_type & kRepBit) {
+      return node;  // Sloppy comparison -> word64
+    }
+    // Can't really convert Word64 to anything else. Purported to be internal.
+    return TypeError(node, output_type, kRepWord64);
+  }
+
+  const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Int32Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Int32Div();
+      case IrOpcode::kNumberModulus:
+        return machine()->Int32Mod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Int32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Int32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Int32Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Int32UDiv();
+      case IrOpcode::kNumberModulus:
+        return machine()->Int32UMod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Uint32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Uint32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Float64Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Float64Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Float64Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Float64Div();
+      case IrOpcode::kNumberModulus:
+        return machine()->Float64Mod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Float64Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Float64LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  MachineType TypeForBasePointer(const FieldAccess& access) {
+    return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+  }
+
+  MachineType TypeForBasePointer(const ElementAccess& access) {
+    return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+  }
+
+  MachineType TypeFromUpperBound(Type* type) {
+    if (type->Is(Type::None()))
+      return kTypeAny;  // TODO(titzer): should be an error
+    if (type->Is(Type::Signed32())) return kTypeInt32;
+    if (type->Is(Type::Unsigned32())) return kTypeUint32;
+    if (type->Is(Type::Number())) return kTypeNumber;
+    if (type->Is(Type::Boolean())) return kTypeBool;
+    return kTypeAny;
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder* simplified_;
+  Isolate* isolate_;
+
+  friend class RepresentationChangerTester;  // accesses the below fields.
+
+  bool testing_type_errors_;  // If {true}, don't abort on a type error.
+  bool type_error_;           // Set when a type error is detected.
+
+  Node* TypeError(Node* node, MachineTypeUnion output_type,
+                  MachineTypeUnion use) {
+    type_error_ = true;
+    if (!testing_type_errors_) {
+      OStringStream out_str;
+      out_str << static_cast<MachineType>(output_type);
+
+      OStringStream use_str;
+      use_str << static_cast<MachineType>(use);
+
+      V8_Fatal(__FILE__, __LINE__,
+               "RepresentationChangerError: node #%d:%s of "
+               "%s cannot be changed to %s",
+               node->id(), node->op()->mnemonic(), out_str.c_str(),
+               use_str.c_str());
+    }
+    return node;
+  }
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Isolate* isolate() { return isolate_; }
+  SimplifiedOperatorBuilder* simplified() { return simplified_; }
+  MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
new file mode 100644
index 0000000..a3b5ed3
--- /dev/null
+++ b/src/compiler/schedule.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/schedule.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
+  switch (c) {
+    case BasicBlockData::kNone:
+      return os << "none";
+    case BasicBlockData::kGoto:
+      return os << "goto";
+    case BasicBlockData::kBranch:
+      return os << "branch";
+    case BasicBlockData::kReturn:
+      return os << "return";
+    case BasicBlockData::kThrow:
+      return os << "throw";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Schedule& s) {
+  // TODO(svenpanne) Const-correct the RPO stuff/iterators.
+  BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
+  for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
+    BasicBlock* block = *i;
+    os << "--- BLOCK B" << block->id();
+    if (block->PredecessorCount() != 0) os << " <- ";
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    bool comma = false;
+    for (BasicBlock::Predecessors::iterator j = predecessors.begin();
+         j != predecessors.end(); ++j) {
+      if (comma) os << ", ";
+      comma = true;
+      os << "B" << (*j)->id();
+    }
+    os << " ---\n";
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* node = *j;
+      os << "  " << *node;
+      if (!NodeProperties::IsControl(node)) {
+        Bounds bounds = NodeProperties::GetBounds(node);
+        os << " : ";
+        bounds.lower->PrintTo(os);
+        if (!bounds.upper->Is(bounds.lower)) {
+          os << "..";
+          bounds.upper->PrintTo(os);
+        }
+      }
+      os << "\n";
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      os << "  ";
+      if (block->control_input_ != NULL) {
+        os << *block->control_input_;
+      } else {
+        os << "Goto";
+      }
+      os << " -> ";
+      BasicBlock::Successors successors = block->successors();
+      comma = false;
+      for (BasicBlock::Successors::iterator j = successors.begin();
+           j != successors.end(); ++j) {
+        if (comma) os << ", ";
+        comma = true;
+        os << "B" << (*j)->id();
+      }
+      os << "\n";
+    }
+  }
+  return os;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
new file mode 100644
index 0000000..070691e
--- /dev/null
+++ b/src/compiler/schedule.h
@@ -0,0 +1,306 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULE_H_
+#define V8_COMPILER_SCHEDULE_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Graph;
+class ConstructScheduleData;
+class CodeGenerator;  // Because of a namespace bug in clang.
+
+class BasicBlockData {
+ public:
+  // Possible control nodes that can end a block.
+  enum Control {
+    kNone,    // Control not initialized yet.
+    kGoto,    // Goto a single successor block.
+    kBranch,  // Branch if true to first successor, otherwise second.
+    kReturn,  // Return a value from this method.
+    kThrow    // Throw an exception.
+  };
+
+  int32_t rpo_number_;       // special RPO number of the block.
+  BasicBlock* dominator_;    // Immediate dominator of the block.
+  BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
+                             // NULL if none. For loop headers, this points to
+                             // enclosing loop header.
+  int32_t loop_depth_;       // loop nesting, 0 is top-level
+  int32_t loop_end_;         // end of the loop, if this block is a loop header.
+  int32_t code_start_;       // start index of arch-specific code.
+  int32_t code_end_;         // end index of arch-specific code.
+  bool deferred_;            // {true} if this block is considered the slow
+                             // path.
+  Control control_;          // Control at the end of the block.
+  Node* control_input_;      // Input value for control.
+  NodeVector nodes_;         // nodes of this block in forward order.
+
+  explicit BasicBlockData(Zone* zone)
+      : rpo_number_(-1),
+        dominator_(NULL),
+        loop_header_(NULL),
+        loop_depth_(0),
+        loop_end_(-1),
+        code_start_(-1),
+        code_end_(-1),
+        deferred_(false),
+        control_(kNone),
+        control_input_(NULL),
+        nodes_(zone) {}
+
+  inline bool IsLoopHeader() const { return loop_end_ >= 0; }
+  inline bool LoopContains(BasicBlockData* block) const {
+    // RPO numbers must be initialized.
+    DCHECK(rpo_number_ >= 0);
+    DCHECK(block->rpo_number_ >= 0);
+    if (loop_end_ < 0) return false;  // This is not a loop.
+    return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_;
+  }
+  int first_instruction_index() {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_start_;
+  }
+  int last_instruction_index() {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_end_ - 1;
+  }
+};
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
+
+// A basic block contains an ordered list of nodes and ends with a control
+// node. Note that if a basic block has phis, then all phis must appear as the
+// first nodes in the block.
+class BasicBlock FINAL : public GenericNode<BasicBlockData, BasicBlock> {
+ public:
+  BasicBlock(GenericGraphBase* graph, int input_count)
+      : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
+
+  typedef Uses Successors;
+  typedef Inputs Predecessors;
+
+  Successors successors() { return static_cast<Successors>(uses()); }
+  Predecessors predecessors() { return static_cast<Predecessors>(inputs()); }
+
+  int PredecessorCount() { return InputCount(); }
+  BasicBlock* PredecessorAt(int index) { return InputAt(index); }
+
+  int SuccessorCount() { return UseCount(); }
+  BasicBlock* SuccessorAt(int index) { return UseAt(index); }
+
+  int PredecessorIndexOf(BasicBlock* predecessor) {
+    BasicBlock::Predecessors predecessors = this->predecessors();
+    for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+         i != predecessors.end(); ++i) {
+      if (*i == predecessor) return i.index();
+    }
+    return -1;
+  }
+
+  inline BasicBlock* loop_header() {
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+  inline BasicBlock* ContainingLoop() {
+    if (IsLoopHeader()) return this;
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+
+  typedef NodeVector::iterator iterator;
+  iterator begin() { return nodes_.begin(); }
+  iterator end() { return nodes_.end(); }
+
+  typedef NodeVector::const_iterator const_iterator;
+  const_iterator begin() const { return nodes_.begin(); }
+  const_iterator end() const { return nodes_.end(); }
+
+  typedef NodeVector::reverse_iterator reverse_iterator;
+  reverse_iterator rbegin() { return nodes_.rbegin(); }
+  reverse_iterator rend() { return nodes_.rend(); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
+};
+
+typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
+    NullBasicBlockVisitor;
+
+typedef ZoneVector<BasicBlock*> BasicBlockVector;
+typedef BasicBlockVector::iterator BasicBlockVectorIter;
+typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
+
+// A schedule represents the result of assigning nodes to basic blocks
+// and ordering them within basic blocks. Prior to computing a schedule,
+// a graph has no notion of control flow ordering other than that induced
+// by the graph's dependencies. A schedule is required to generate code.
+class Schedule : public GenericGraph<BasicBlock> {
+ public:
+  explicit Schedule(Zone* zone)
+      : GenericGraph<BasicBlock>(zone),
+        zone_(zone),
+        all_blocks_(zone),
+        nodeid_to_block_(zone),
+        rpo_order_(zone) {
+    SetStart(NewBasicBlock());  // entry.
+    SetEnd(NewBasicBlock());    // exit.
+  }
+
+  // Return the block which contains {node}, if any.
+  BasicBlock* block(Node* node) const {
+    if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
+      return nodeid_to_block_[node->id()];
+    }
+    return NULL;
+  }
+
+  bool IsScheduled(Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) return false;
+    return nodeid_to_block_[node->id()] != NULL;
+  }
+
+  BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
+
+  int BasicBlockCount() const { return NodeCount(); }
+  int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); }
+
+  typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;
+
+  // Return a list of all the blocks in the schedule, in arbitrary order.
+  BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); }
+
+  // Check if nodes {a} and {b} are in the same block.
+  inline bool SameBasicBlock(Node* a, Node* b) const {
+    BasicBlock* block = this->block(a);
+    return block != NULL && block == this->block(b);
+  }
+
+  // BasicBlock building: create a new block.
+  inline BasicBlock* NewBasicBlock() {
+    BasicBlock* block =
+        BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL));
+    all_blocks_.push_back(block);
+    return block;
+  }
+
+  // BasicBlock building: records that a node will later be added to a block but
+  // doesn't actually add the node to the block.
+  inline void PlanNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Planning #%d:%s for future add to B%d\n", node->id(),
+             node->op()->mnemonic(), block->id());
+    }
+    DCHECK(this->block(node) == NULL);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a node to the end of the block.
+  inline void AddNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Adding #%d:%s to B%d\n", node->id(), node->op()->mnemonic(),
+             block->id());
+    }
+    DCHECK(this->block(node) == NULL || this->block(node) == block);
+    block->nodes_.push_back(node);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a goto to the end of {block}.
+  void AddGoto(BasicBlock* block, BasicBlock* succ) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kGoto;
+    AddSuccessor(block, succ);
+  }
+
+  // BasicBlock building: add a branch at the end of {block}.
+  void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
+                 BasicBlock* fblock) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    DCHECK(branch->opcode() == IrOpcode::kBranch);
+    block->control_ = BasicBlock::kBranch;
+    AddSuccessor(block, tblock);
+    AddSuccessor(block, fblock);
+    SetControlInput(block, branch);
+    if (branch->opcode() == IrOpcode::kBranch) {
+      // TODO(titzer): require a Branch node here. (sloppy tests).
+      SetBlockForNode(block, branch);
+    }
+  }
+
+  // BasicBlock building: add a return at the end of {block}.
+  void AddReturn(BasicBlock* block, Node* input) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kReturn;
+    SetControlInput(block, input);
+    if (block != end()) AddSuccessor(block, end());
+    if (input->opcode() == IrOpcode::kReturn) {
+      // TODO(titzer): require a Return node here. (sloppy tests).
+      SetBlockForNode(block, input);
+    }
+  }
+
+  // BasicBlock building: add a throw at the end of {block}.
+  void AddThrow(BasicBlock* block, Node* input) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kThrow;
+    SetControlInput(block, input);
+    if (block != end()) AddSuccessor(block, end());
+  }
+
+  friend class Scheduler;
+  friend class CodeGenerator;
+
+  void AddSuccessor(BasicBlock* block, BasicBlock* succ) {
+    succ->AppendInput(zone_, block);
+  }
+
+  BasicBlockVector* rpo_order() { return &rpo_order_; }
+
+ private:
+  friend class ScheduleVisualizer;
+
+  void SetControlInput(BasicBlock* block, Node* node) {
+    block->control_input_ = node;
+    SetBlockForNode(block, node);
+  }
+
+  void SetBlockForNode(BasicBlock* block, Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) {
+      nodeid_to_block_.resize(node->id() + 1);
+    }
+    nodeid_to_block_[node->id()] = block;
+  }
+
+  Zone* zone_;
+  BasicBlockVector all_blocks_;           // All basic blocks in the schedule.
+  BasicBlockVector nodeid_to_block_;      // Map from node to containing block.
+  BasicBlockVector rpo_order_;            // Reverse-post-order block list.
+};
+
+OStream& operator<<(OStream& os, const Schedule& s);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULE_H_
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
new file mode 100644
index 0000000..4029950
--- /dev/null
+++ b/src/compiler/scheduler.cc
@@ -0,0 +1,1125 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+#include <queue>
+
+#include "src/compiler/scheduler.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline void Trace(const char* msg, ...) {
+  if (FLAG_trace_turbo_scheduler) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+// Internal class to build a control flow graph (i.e the basic blocks and edges
+// between them within a Schedule) from the node graph.
+// Visits the control edges of the graph backwards from end in order to find
+// the connected control subgraph, needed for scheduling.
+class CFGBuilder {
+ public:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  ZoneQueue<Node*> queue_;
+  NodeVector control_;
+
+  CFGBuilder(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler),
+        schedule_(scheduler->schedule_),
+        queue_(zone),
+        control_(zone) {}
+
+  // Run the control flow graph construction algorithm by walking the graph
+  // backwards from end through control edges, building and connecting the
+  // basic blocks for control nodes.
+  void Run() {
+    Graph* graph = scheduler_->graph_;
+    FixNode(schedule_->start(), graph->start());
+    Queue(graph->end());
+
+    while (!queue_.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue_.front();
+      queue_.pop();
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        Queue(node->InputAt(i));
+      }
+    }
+
+    for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+      ConnectBlocks(*i);  // Connect block to its predecessor/successors.
+    }
+
+    FixNode(schedule_->end(), graph->end());
+  }
+
+  void FixNode(BasicBlock* block, Node* node) {
+    schedule_->AddNode(block, node);
+    scheduler_->GetData(node)->is_connected_control_ = true;
+    scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+  }
+
+  void Queue(Node* node) {
+    // Mark the connected control nodes as they queued.
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+    if (!data->is_connected_control_) {
+      BuildBlocks(node);
+      queue_.push(node);
+      control_.push_back(node);
+      data->is_connected_control_ = true;
+    }
+  }
+
+  void BuildBlocks(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+        BuildBlockForNode(node);
+        break;
+      case IrOpcode::kBranch:
+        BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
+        break;
+      default:
+        break;
+    }
+  }
+
+  void ConnectBlocks(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+        ConnectMerge(node);
+        break;
+      case IrOpcode::kBranch:
+        scheduler_->schedule_root_nodes_.push_back(node);
+        ConnectBranch(node);
+        break;
+      case IrOpcode::kReturn:
+        scheduler_->schedule_root_nodes_.push_back(node);
+        ConnectReturn(node);
+        break;
+      default:
+        break;
+    }
+  }
+
+  void BuildBlockForNode(Node* node) {
+    if (schedule_->block(node) == NULL) {
+      BasicBlock* block = schedule_->NewBasicBlock();
+      Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
+            node->op()->mnemonic());
+      FixNode(block, node);
+    }
+  }
+
+  void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
+                                IrOpcode::Value b) {
+    Node* successors[2];
+    CollectSuccessorProjections(node, successors, a, b);
+    BuildBlockForNode(successors[0]);
+    BuildBlockForNode(successors[1]);
+  }
+
+  // Collect the branch-related projections from a node, such as IfTrue,
+  // IfFalse.
+  // TODO(titzer): consider moving this to node.h
+  void CollectSuccessorProjections(Node* node, Node** buffer,
+                                   IrOpcode::Value true_opcode,
+                                   IrOpcode::Value false_opcode) {
+    buffer[0] = NULL;
+    buffer[1] = NULL;
+    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+      if ((*i)->opcode() == true_opcode) {
+        DCHECK_EQ(NULL, buffer[0]);
+        buffer[0] = *i;
+      }
+      if ((*i)->opcode() == false_opcode) {
+        DCHECK_EQ(NULL, buffer[1]);
+        buffer[1] = *i;
+      }
+    }
+    DCHECK_NE(NULL, buffer[0]);
+    DCHECK_NE(NULL, buffer[1]);
+  }
+
+  void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
+                              IrOpcode::Value true_opcode,
+                              IrOpcode::Value false_opcode) {
+    Node* successors[2];
+    CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
+    buffer[0] = schedule_->block(successors[0]);
+    buffer[1] = schedule_->block(successors[1]);
+  }
+
+  void ConnectBranch(Node* branch) {
+    Node* branch_block_node = NodeProperties::GetControlInput(branch);
+    BasicBlock* branch_block = schedule_->block(branch_block_node);
+    DCHECK(branch_block != NULL);
+
+    BasicBlock* successor_blocks[2];
+    CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
+                           IrOpcode::kIfFalse);
+
+    TraceConnect(branch, branch_block, successor_blocks[0]);
+    TraceConnect(branch, branch_block, successor_blocks[1]);
+
+    schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+                         successor_blocks[1]);
+  }
+
+  void ConnectMerge(Node* merge) {
+    BasicBlock* block = schedule_->block(merge);
+    DCHECK(block != NULL);
+    // For all of the merge's control inputs, add a goto at the end to the
+    // merge's basic block.
+    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
+         ++j) {
+      BasicBlock* predecessor_block = schedule_->block(*j);
+      if ((*j)->opcode() != IrOpcode::kReturn) {
+        TraceConnect(merge, predecessor_block, block);
+        schedule_->AddGoto(predecessor_block, block);
+      }
+    }
+  }
+
+  void ConnectReturn(Node* ret) {
+    Node* return_block_node = NodeProperties::GetControlInput(ret);
+    BasicBlock* return_block = schedule_->block(return_block_node);
+    TraceConnect(ret, return_block, NULL);
+    schedule_->AddReturn(return_block, ret);
+  }
+
+  void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
+    DCHECK_NE(NULL, block);
+    if (succ == NULL) {
+      Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
+            block->id());
+    } else {
+      Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
+            block->id(), succ->id());
+    }
+  }
+};
+
+
+Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
+  SchedulerData def = {0, 0, false, false, kUnknown};
+  return def;
+}
+
+
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
+      graph_(graph),
+      schedule_(schedule),
+      scheduled_nodes_(zone),
+      schedule_root_nodes_(zone),
+      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
+      has_floating_control_(false) {}
+
+
+Schedule* Scheduler::ComputeSchedule(Graph* graph) {
+  Schedule* schedule;
+  bool had_floating_control = false;
+  do {
+    Zone tmp_zone(graph->zone()->isolate());
+    schedule = new (graph->zone()) Schedule(graph->zone());
+    Scheduler scheduler(&tmp_zone, graph, schedule);
+
+    scheduler.BuildCFG();
+
+    Scheduler::ComputeSpecialRPO(schedule);
+    scheduler.GenerateImmediateDominatorTree();
+
+    scheduler.PrepareUses();
+    scheduler.ScheduleEarly();
+    scheduler.ScheduleLate();
+
+    had_floating_control = scheduler.ConnectFloatingControl();
+  } while (had_floating_control);
+
+  return schedule;
+}
+
+
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are always fixed to the start node.
+        data->placement_ = kFixed;
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are fixed if their control inputs are.
+        data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
+        break;
+      }
+#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
+        CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
+#undef DEFINE_FLOATING_CONTROL_CASE
+        {
+          // Control nodes that were not control-reachable from end may float.
+          data->placement_ = kSchedulable;
+          if (!data->is_connected_control_) {
+            data->is_floating_control_ = true;
+            has_floating_control_ = true;
+            Trace("Floating control found: #%d:%s\n", node->id(),
+                  node->op()->mnemonic());
+          }
+          break;
+        }
+      default:
+        data->placement_ = kSchedulable;
+        break;
+    }
+  }
+  return data->placement_;
+}
+
+
+void Scheduler::BuildCFG() {
+  Trace("---------------- CREATING CFG ------------------\n");
+  CFGBuilder cfg_builder(zone_, this);
+  cfg_builder.Run();
+  // Initialize per-block data.
+  scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+  while (b1 != b2) {
+    int b1_rpo = GetRPONumber(b1);
+    int b2_rpo = GetRPONumber(b2);
+    DCHECK(b1_rpo != b2_rpo);
+    if (b1_rpo < b2_rpo) {
+      b2 = b2->dominator_;
+    } else {
+      b1 = b1->dominator_;
+    }
+  }
+  return b1;
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
+  // if this becomes really slow.
+  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
+  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
+    BasicBlock* current_rpo = schedule_->rpo_order_[i];
+    if (current_rpo != schedule_->start()) {
+      BasicBlock::Predecessors::iterator current_pred =
+          current_rpo->predecessors().begin();
+      BasicBlock::Predecessors::iterator end =
+          current_rpo->predecessors().end();
+      DCHECK(current_pred != end);
+      BasicBlock* dominator = *current_pred;
+      ++current_pred;
+      // For multiple predecessors, walk up the rpo ordering until a common
+      // dominator is found.
+      int current_rpo_pos = GetRPONumber(current_rpo);
+      while (current_pred != end) {
+        // Don't examine backwards edges
+        BasicBlock* pred = *current_pred;
+        if (GetRPONumber(pred) < current_rpo_pos) {
+          dominator = GetCommonDominator(dominator, *current_pred);
+        }
+        ++current_pred;
+      }
+      current_rpo->dominator_ = dominator;
+      Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
+    }
+  }
+}
+
+
+class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
+      : has_changed_rpo_constraints_(true),
+        scheduler_(scheduler),
+        schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    int max_rpo = 0;
+    // Fixed nodes already know their schedule early position.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      BasicBlock* block = schedule_->block(node);
+      DCHECK(block != NULL);
+      max_rpo = block->rpo_number_;
+      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+      Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
+            node->op()->mnemonic(), max_rpo);
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    int max_rpo = 0;
+    // Otherwise, the minimum rpo for the node is the max of all of the inputs.
+    if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
+      for (InputIter i = node->inputs().begin(); i != node->inputs().end();
+           ++i) {
+        int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
+        if (control_rpo > max_rpo) {
+          max_rpo = control_rpo;
+        }
+      }
+      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+      Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
+            node->op()->mnemonic(), max_rpo);
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
+  // rewritten to use a pre-order traversal from the start instead.
+  bool has_changed_rpo_constraints_;
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+  Trace("------------------- SCHEDULE EARLY ----------------\n");
+
+  int fixpoint_count = 0;
+  ScheduleEarlyNodeVisitor visitor(this);
+  while (visitor.has_changed_rpo_constraints_) {
+    visitor.has_changed_rpo_constraints_ = false;
+    graph_->VisitNodeInputsFromEnd(&visitor);
+    fixpoint_count++;
+  }
+
+  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
+}
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+  explicit PrepareUsesVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      // Fixed nodes are always roots for schedule late.
+      scheduler_->schedule_root_nodes_.push_back(node);
+      if (!schedule_->IsScheduled(node)) {
+        // Make sure root nodes are scheduled in their respective blocks.
+        Trace("  Scheduling fixed position node #%d:%s\n", node->id(),
+              node->op()->mnemonic());
+        IrOpcode::Value opcode = node->opcode();
+        BasicBlock* block =
+            opcode == IrOpcode::kParameter
+                ? schedule_->start()
+                : schedule_->block(NodeProperties::GetControlInput(node));
+        DCHECK(block != NULL);
+        schedule_->AddNode(block, node);
+      }
+    }
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    // If the edge is from an unscheduled node, then tally it in the use count
+    // for all of its inputs. The same criterion will be used in ScheduleLate
+    // for decrementing use counts.
+    if (!schedule_->IsScheduled(from)) {
+      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
+      ++(scheduler_->GetData(to)->unscheduled_count_);
+      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
+            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+            scheduler_->GetData(to)->unscheduled_count_);
+    }
+  }
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+  Trace("------------------- PREPARE USES ------------------\n");
+  // Count the uses of every node, it will be used to ensure that all of a
+  // node's uses are scheduled before the node itself.
+  PrepareUsesVisitor prepare_uses(this);
+  graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+class ScheduleLateNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    // Don't schedule nodes that are already scheduled.
+    if (schedule_->IsScheduled(node)) {
+      return GenericGraphVisit::CONTINUE;
+    }
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+
+    // If all the uses of a node have been scheduled, then the node itself can
+    // be scheduled.
+    bool eligible = data->unscheduled_count_ == 0;
+    Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
+          node->op()->mnemonic(), eligible ? "true" : "false");
+    if (!eligible) return GenericGraphVisit::DEFER;
+
+    // Determine the dominating block for all of the uses of this node. It is
+    // the latest block that this node can be scheduled in.
+    BasicBlock* block = NULL;
+    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
+         ++i) {
+      BasicBlock* use_block = GetBlockForUse(i.edge());
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+                                              : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    DCHECK(block != NULL);
+
+    int min_rpo = data->minimum_rpo_;
+    Trace(
+        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
+        "minimum_rpo = %d\n",
+        node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
+        min_rpo);
+    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+    // into enclosing loop pre-headers until they would preceed their
+    // ScheduleEarly position.
+    BasicBlock* hoist_block = block;
+    while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
+      if (hoist_block->loop_depth_ < block->loop_depth_) {
+        block = hoist_block;
+        Trace("  hoisting #%d:%s to block %d\n", node->id(),
+              node->op()->mnemonic(), block->id());
+      }
+      // Try to hoist to the pre-header of the loop header.
+      hoist_block = hoist_block->loop_header();
+      if (hoist_block != NULL) {
+        BasicBlock* pre_header = hoist_block->dominator_;
+        DCHECK(pre_header == NULL ||
+               *hoist_block->predecessors().begin() == pre_header);
+        Trace(
+            "  hoist to pre-header B%d of loop header B%d, depth would be %d\n",
+            pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
+        hoist_block = pre_header;
+      }
+    }
+
+    ScheduleNode(block, node);
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  BasicBlock* GetBlockForUse(Node::Edge edge) {
+    Node* use = edge.from();
+    IrOpcode::Value opcode = use->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      // If the use is from a fixed (i.e. non-floating) phi, use the block
+      // of the corresponding control input to the merge.
+      int index = edge.index();
+      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
+              use->op()->mnemonic());
+        Node* merge = NodeProperties::GetControlInput(use, 0);
+        opcode = merge->opcode();
+        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+        use = NodeProperties::GetControlInput(merge, index);
+      }
+    }
+    BasicBlock* result = schedule_->block(use);
+    if (result == NULL) return NULL;
+    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
+          use->op()->mnemonic(), result->id());
+    return result;
+  }
+
+  void ScheduleNode(BasicBlock* block, Node* node) {
+    schedule_->PlanNode(block, node);
+    scheduler_->scheduled_nodes_[block->id()].push_back(node);
+
+    // Reduce the use count of the node's inputs to potentially make them
+    // schedulable.
+    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
+      DCHECK(data->unscheduled_count_ > 0);
+      --data->unscheduled_count_;
+      if (FLAG_trace_turbo_scheduler) {
+        Trace("  Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
+              (*i)->op()->mnemonic(), i.edge().from()->id(),
+              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
+        if (data->unscheduled_count_ == 0) {
+          Trace("  newly eligible #%d:%s\n", (*i)->id(),
+                (*i)->op()->mnemonic());
+        }
+      }
+    }
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+  Trace("------------------- SCHEDULE LATE -----------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (NodeVectorIter i = schedule_root_nodes_.begin();
+         i != schedule_root_nodes_.end(); ++i) {
+      Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Schedule: Places nodes in dominator block of all their uses.
+  ScheduleLateNodeVisitor schedule_late_visitor(this);
+
+  {
+    Zone zone(zone_->isolate());
+    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
+                             NodeInputIterationTraits<Node> >(
+        graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
+        &schedule_late_visitor);
+  }
+
+  // Add collected nodes for basic blocks to their blocks in the right order.
+  int block_num = 0;
+  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
+       i != scheduled_nodes_.end(); ++i) {
+    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
+      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+    }
+    block_num++;
+  }
+}
+
+
+bool Scheduler::ConnectFloatingControl() {
+  if (!has_floating_control_) return false;
+
+  Trace("Connecting floating control...\n");
+
+  // Process blocks and instructions backwards to find and connect floating
+  // control nodes into the control graph according to the block they were
+  // scheduled into.
+  int max = static_cast<int>(schedule_->rpo_order()->size());
+  for (int i = max - 1; i >= 0; i--) {
+    BasicBlock* block = schedule_->rpo_order()->at(i);
+    // TODO(titzer): we place at most one floating control structure per
+    // basic block because scheduling currently can interleave phis from
+    // one subgraph with the merges from another subgraph.
+    bool one_placed = false;
+    for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
+      Node* node = block->nodes_[j];
+      SchedulerData* data = GetData(node);
+      if (data->is_floating_control_ && !data->is_connected_control_ &&
+          !one_placed) {
+        Trace("  Floating control #%d:%s was scheduled in B%d\n", node->id(),
+              node->op()->mnemonic(), block->id());
+        ConnectFloatingControlSubgraph(block, node);
+        one_placed = true;
+      }
+    }
+  }
+
+  return true;
+}
+
+
+void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
+  Node* block_start = block->nodes_[0];
+  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
+  // Find the current "control successor" of the node that starts the block
+  // by searching the control uses for a control input edge from a connected
+  // control node.
+  Node* control_succ = NULL;
+  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
+       ++i) {
+    Node::Edge edge = i.edge();
+    if (NodeProperties::IsControlEdge(edge) &&
+        GetData(edge.from())->is_connected_control_) {
+      DCHECK_EQ(NULL, control_succ);
+      control_succ = edge.from();
+      control_succ->ReplaceInput(edge.index(), end);
+    }
+  }
+  DCHECK_NE(NULL, control_succ);
+  Trace("  Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
+        end->id(), end->op()->mnemonic(), control_succ->id(),
+        control_succ->op()->mnemonic(), block_start->id(),
+        block_start->op()->mnemonic());
+
+  // Find the "start" node of the control subgraph, which should be the
+  // unique node that is itself floating control but has a control input that
+  // is not floating.
+  Node* start = NULL;
+  ZoneQueue<Node*> queue(zone_);
+  queue.push(end);
+  GetData(end)->is_connected_control_ = true;
+  while (!queue.empty()) {
+    Node* node = queue.front();
+    queue.pop();
+    Trace("  Search #%d:%s for control subgraph start\n", node->id(),
+          node->op()->mnemonic());
+    int max = NodeProperties::PastControlIndex(node);
+    for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+      Node* input = node->InputAt(i);
+      SchedulerData* data = GetData(input);
+      if (data->is_floating_control_) {
+        // {input} is floating control.
+        if (!data->is_connected_control_) {
+          // First time seeing {input} during this traversal, queue it.
+          queue.push(input);
+          data->is_connected_control_ = true;
+        }
+      } else {
+        // Otherwise, {node} is the start node, because it is floating control
+        // but is connected to {input} that is not floating control.
+        DCHECK_EQ(NULL, start);  // There can be only one.
+        start = node;
+      }
+    }
+  }
+
+  DCHECK_NE(NULL, start);
+  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
+
+  Trace("  Connecting floating control start %d:%s to %d:%s\n", start->id(),
+        start->op()->mnemonic(), block_start->id(),
+        block_start->op()->mnemonic());
+}
+
+
+// Numbering for BasicBlockData.rpo_number_ for this block traversal:
+static const int kBlockOnStack = -2;
+static const int kBlockVisited1 = -3;
+static const int kBlockVisited2 = -4;
+static const int kBlockUnvisited1 = -1;
+static const int kBlockUnvisited2 = kBlockVisited1;
+
+struct SpecialRPOStackFrame {
+  BasicBlock* block;
+  int index;
+};
+
+struct BlockList {
+  BasicBlock* block;
+  BlockList* next;
+
+  BlockList* Add(Zone* zone, BasicBlock* b) {
+    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+    list->block = b;
+    list->next = this;
+    return list;
+  }
+
+  void Serialize(BasicBlockVector* final_order) {
+    for (BlockList* l = this; l != NULL; l = l->next) {
+      l->block->rpo_number_ = static_cast<int>(final_order->size());
+      final_order->push_back(l->block);
+    }
+  }
+};
+
+struct LoopInfo {
+  BasicBlock* header;
+  ZoneList<BasicBlock*>* outgoing;
+  BitVector* members;
+  LoopInfo* prev;
+  BlockList* end;
+  BlockList* start;
+
+  void AddOutgoing(Zone* zone, BasicBlock* block) {
+    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+    outgoing->Add(block, zone);
+  }
+};
+
+
+static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+                int unvisited) {
+  if (child->rpo_number_ == unvisited) {
+    stack[depth].block = child;
+    stack[depth].index = 0;
+    child->rpo_number_ = kBlockOnStack;
+    return depth + 1;
+  }
+  return depth;
+}
+
+
+// Computes loop membership from the backedges of the control flow graph.
+static LoopInfo* ComputeLoopInfo(
+    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
+    ZoneList<std::pair<BasicBlock*, int> >* backedges) {
+  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
+  memset(loops, 0, num_loops * sizeof(LoopInfo));
+
+  // Compute loop membership starting from backedges.
+  // O(max(loop_depth) * max(|loop|)
+  for (int i = 0; i < backedges->length(); i++) {
+    BasicBlock* member = backedges->at(i).first;
+    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+    int loop_num = header->loop_end_;
+    if (loops[loop_num].header == NULL) {
+      loops[loop_num].header = header;
+      loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
+    }
+
+    int queue_length = 0;
+    if (member != header) {
+      // As long as the header doesn't have a backedge to itself,
+      // Push the member onto the queue and process its predecessors.
+      if (!loops[loop_num].members->Contains(member->id())) {
+        loops[loop_num].members->Add(member->id());
+      }
+      queue[queue_length++].block = member;
+    }
+
+    // Propagate loop membership backwards. All predecessors of M up to the
+    // loop header H are members of the loop too. O(|blocks between M and H|).
+    while (queue_length > 0) {
+      BasicBlock* block = queue[--queue_length].block;
+      for (int i = 0; i < block->PredecessorCount(); i++) {
+        BasicBlock* pred = block->PredecessorAt(i);
+        if (pred != header) {
+          if (!loops[loop_num].members->Contains(pred->id())) {
+            loops[loop_num].members->Add(pred->id());
+            queue[queue_length++].block = pred;
+          }
+        }
+      }
+    }
+  }
+  return loops;
+}
+
+
+#if DEBUG
+static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+  PrintF("-- RPO with %d loops ", num_loops);
+  if (num_loops > 0) {
+    PrintF("(");
+    for (int i = 0; i < num_loops; i++) {
+      if (i > 0) PrintF(" ");
+      PrintF("B%d", loops[i].header->id());
+    }
+    PrintF(") ");
+  }
+  PrintF("-- \n");
+
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    BasicBlock* block = (*order)[i];
+    int bid = block->id();
+    PrintF("%5d:", i);
+    for (int i = 0; i < num_loops; i++) {
+      bool membership = loops[i].members->Contains(bid);
+      bool range = loops[i].header->LoopContains(block);
+      PrintF(membership ? " |" : "  ");
+      PrintF(range ? "x" : " ");
+    }
+    PrintF("  B%d: ", bid);
+    if (block->loop_end_ >= 0) {
+      PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
+    }
+    PrintF("\n");
+  }
+}
+
+
+static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
+                             BasicBlockVector* order) {
+  DCHECK(order->size() > 0);
+  DCHECK((*order)[0]->id() == 0);  // entry should be first.
+
+  for (int i = 0; i < num_loops; i++) {
+    LoopInfo* loop = &loops[i];
+    BasicBlock* header = loop->header;
+
+    DCHECK(header != NULL);
+    DCHECK(header->rpo_number_ >= 0);
+    DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
+    DCHECK(header->loop_end_ >= 0);
+    DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
+    DCHECK(header->loop_end_ > header->rpo_number_);
+
+    // Verify the start ... end list relationship.
+    int links = 0;
+    BlockList* l = loop->start;
+    DCHECK(l != NULL && l->block == header);
+    bool end_found;
+    while (true) {
+      if (l == NULL || l == loop->end) {
+        end_found = (loop->end == l);
+        break;
+      }
+      // The list should be in same order as the final result.
+      DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
+      links++;
+      l = l->next;
+      DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
+    }
+    DCHECK(links > 0);
+    DCHECK(links == (header->loop_end_ - header->rpo_number_));
+    DCHECK(end_found);
+
+    // Check the contiguousness of loops.
+    int count = 0;
+    for (int j = 0; j < static_cast<int>(order->size()); j++) {
+      BasicBlock* block = order->at(j);
+      DCHECK(block->rpo_number_ == j);
+      if (j < header->rpo_number_ || j >= header->loop_end_) {
+        DCHECK(!loop->members->Contains(block->id()));
+      } else {
+        if (block == header) {
+          DCHECK(!loop->members->Contains(block->id()));
+        } else {
+          DCHECK(loop->members->Contains(block->id()));
+        }
+        count++;
+      }
+    }
+    DCHECK(links == count);
+  }
+}
+#endif  // DEBUG
+
+
+// Compute the special reverse-post-order block ordering, which is essentially
+// a RPO of the graph where loop bodies are contiguous. Properties:
+// 1. If block A is a predecessor of B, then A appears before B in the order,
+//    unless B is a loop header and A is in the loop headed at B
+//    (i.e. A -> B is a backedge).
+// => If block A dominates block B, then A appears before B in the order.
+// => If block A is a loop header, A appears before all blocks in the loop
+//    headed at A.
+// 2. All loops are contiguous in the order (i.e. no intervening blocks that
+//    do not belong to the loop.)
+// Note a simple RPO traversal satisfies (1) but not (3).
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
+  Zone tmp_zone(schedule->zone()->isolate());
+  Zone* zone = &tmp_zone;
+  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
+  // RPO should not have been computed for this schedule yet.
+  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
+  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
+
+  // Perform an iterative RPO traversal using an explicit stack,
+  // recording backedges that form cycles. O(|B|).
+  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
+  SpecialRPOStackFrame* stack =
+      zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
+  BasicBlock* entry = schedule->start();
+  BlockList* order = NULL;
+  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
+  int num_loops = 0;
+
+  while (stack_depth > 0) {
+    int current = stack_depth - 1;
+    SpecialRPOStackFrame* frame = stack + current;
+
+    if (frame->index < frame->block->SuccessorCount()) {
+      // Process the next successor.
+      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+      if (succ->rpo_number_ == kBlockVisited1) continue;
+      if (succ->rpo_number_ == kBlockOnStack) {
+        // The successor is on the stack, so this is a backedge (cycle).
+        backedges.Add(
+            std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
+        if (succ->loop_end_ < 0) {
+          // Assign a new loop number to the header if it doesn't have one.
+          succ->loop_end_ = num_loops++;
+        }
+      } else {
+        // Push the successor onto the stack.
+        DCHECK(succ->rpo_number_ == kBlockUnvisited1);
+        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
+      }
+    } else {
+      // Finished with all successors; pop the stack and add the block.
+      order = order->Add(zone, frame->block);
+      frame->block->rpo_number_ = kBlockVisited1;
+      stack_depth--;
+    }
+  }
+
+  // If no loops were encountered, then the order we computed was correct.
+  LoopInfo* loops = NULL;
+  if (num_loops != 0) {
+    // Otherwise, compute the loop information from the backedges in order
+    // to perform a traversal that groups loop bodies together.
+    loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
+                            &backedges);
+
+    // Initialize the "loop stack". Note the entry could be a loop header.
+    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
+    order = NULL;
+
+    // Perform an iterative post-order traversal, visiting loop bodies before
+    // edges that lead out of loops. Visits each block once, but linking loop
+    // sections together is linear in the loop size, so overall is
+    // O(|B| + max(loop_depth) * max(|loop|))
+    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
+    while (stack_depth > 0) {
+      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
+      BasicBlock* block = frame->block;
+      BasicBlock* succ = NULL;
+
+      if (frame->index < block->SuccessorCount()) {
+        // Process the next normal successor.
+        succ = block->SuccessorAt(frame->index++);
+      } else if (block->IsLoopHeader()) {
+        // Process additional outgoing edges from the loop header.
+        if (block->rpo_number_ == kBlockOnStack) {
+          // Finish the loop body the first time the header is left on the
+          // stack.
+          DCHECK(loop != NULL && loop->header == block);
+          loop->start = order->Add(zone, block);
+          order = loop->end;
+          block->rpo_number_ = kBlockVisited2;
+          // Pop the loop stack and continue visiting outgoing edges within the
+          // the context of the outer loop, if any.
+          loop = loop->prev;
+          // We leave the loop header on the stack; the rest of this iteration
+          // and later iterations will go through its outgoing edges list.
+        }
+
+        // Use the next outgoing edge if there are any.
+        int outgoing_index = frame->index - block->SuccessorCount();
+        LoopInfo* info = &loops[block->loop_end_];
+        DCHECK(loop != info);
+        if (info->outgoing != NULL &&
+            outgoing_index < info->outgoing->length()) {
+          succ = info->outgoing->at(outgoing_index);
+          frame->index++;
+        }
+      }
+
+      if (succ != NULL) {
+        // Process the next successor.
+        if (succ->rpo_number_ == kBlockOnStack) continue;
+        if (succ->rpo_number_ == kBlockVisited2) continue;
+        DCHECK(succ->rpo_number_ == kBlockUnvisited2);
+        if (loop != NULL && !loop->members->Contains(succ->id())) {
+          // The successor is not in the current loop or any nested loop.
+          // Add it to the outgoing edges of this loop and visit it later.
+          loop->AddOutgoing(zone, succ);
+        } else {
+          // Push the successor onto the stack.
+          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
+          if (succ->IsLoopHeader()) {
+            // Push the inner loop onto the loop stack.
+            DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
+            LoopInfo* next = &loops[succ->loop_end_];
+            next->end = order;
+            next->prev = loop;
+            loop = next;
+          }
+        }
+      } else {
+        // Finished with all successors of the current block.
+        if (block->IsLoopHeader()) {
+          // If we are going to pop a loop header, then add its entire body.
+          LoopInfo* info = &loops[block->loop_end_];
+          for (BlockList* l = info->start; true; l = l->next) {
+            if (l->next == info->end) {
+              l->next = order;
+              info->end = order;
+              break;
+            }
+          }
+          order = info->start;
+        } else {
+          // Pop a single node off the stack and add it to the order.
+          order = order->Add(zone, block);
+          block->rpo_number_ = kBlockVisited2;
+        }
+        stack_depth--;
+      }
+    }
+  }
+
+  // Construct the final order from the list.
+  BasicBlockVector* final_order = &schedule->rpo_order_;
+  order->Serialize(final_order);
+
+  // Compute the correct loop header for every block and set the correct loop
+  // ends.
+  LoopInfo* current_loop = NULL;
+  BasicBlock* current_header = NULL;
+  int loop_depth = 0;
+  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
+       ++i) {
+    BasicBlock* current = *i;
+    current->loop_header_ = current_header;
+    if (current->IsLoopHeader()) {
+      loop_depth++;
+      current_loop = &loops[current->loop_end_];
+      BlockList* end = current_loop->end;
+      current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
+                                       : end->block->rpo_number_;
+      current_header = current_loop->header;
+      Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
+            loop_depth);
+    } else {
+      while (current_header != NULL &&
+             current->rpo_number_ >= current_header->loop_end_) {
+        DCHECK(current_header->IsLoopHeader());
+        DCHECK(current_loop != NULL);
+        current_loop = current_loop->prev;
+        current_header = current_loop == NULL ? NULL : current_loop->header;
+        --loop_depth;
+      }
+    }
+    current->loop_depth_ = loop_depth;
+    if (current->loop_header_ == NULL) {
+      Trace("B%d is not in a loop (depth == %d)\n", current->id(),
+            current->loop_depth_);
+    } else {
+      Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
+            current->loop_header_->id(), current->loop_depth_);
+    }
+  }
+
+#if DEBUG
+  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
+  VerifySpecialRPO(num_loops, loops, final_order);
+#endif
+  return final_order;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
new file mode 100644
index 0000000..b21662f
--- /dev/null
+++ b/src/compiler/scheduler.h
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULER_H_
+#define V8_COMPILER_SCHEDULER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Computes a schedule from a graph, placing nodes into basic blocks and
+// ordering the basic blocks in the special RPO order.
+class Scheduler {
+ public:
+  // The complete scheduling algorithm.
+  // Create a new schedule and place all nodes from the graph into it.
+  static Schedule* ComputeSchedule(Graph* graph);
+
+  // Compute the RPO of blocks in an existing schedule.
+  static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
+
+  // (Exposed for testing only)
+  // Build and connect the CFG for a node graph, but don't schedule nodes.
+  static void ComputeCFG(Graph* graph, Schedule* schedule);
+
+ private:
+  enum Placement { kUnknown, kSchedulable, kFixed };
+
+  // Per-node data tracked during scheduling.
+  struct SchedulerData {
+    int unscheduled_count_;      // Number of unscheduled uses of this node.
+    int minimum_rpo_;            // Minimum legal RPO placement.
+    bool is_connected_control_;  // {true} if control-connected to the end node.
+    bool is_floating_control_;   // {true} if control, but not control-connected
+                                 // to the end node.
+    Placement placement_ : 3;    // Whether the node is fixed, schedulable,
+                                 // or not yet known.
+  };
+
+  Zone* zone_;
+  Graph* graph_;
+  Schedule* schedule_;
+  NodeVectorVector scheduled_nodes_;
+  NodeVector schedule_root_nodes_;
+  ZoneVector<SchedulerData> node_data_;
+  bool has_floating_control_;
+
+  Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+
+  SchedulerData DefaultSchedulerData();
+
+  SchedulerData* GetData(Node* node) {
+    DCHECK(node->id() < static_cast<int>(node_data_.size()));
+    return &node_data_[node->id()];
+  }
+
+  void BuildCFG();
+
+  Placement GetPlacement(Node* node);
+
+  int GetRPONumber(BasicBlock* block) {
+    DCHECK(block->rpo_number_ >= 0 &&
+           block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
+    DCHECK(schedule_->rpo_order_[block->rpo_number_] == block);
+    return block->rpo_number_;
+  }
+
+  void GenerateImmediateDominatorTree();
+  BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
+  friend class CFGBuilder;
+
+  friend class ScheduleEarlyNodeVisitor;
+  void ScheduleEarly();
+
+  friend class PrepareUsesVisitor;
+  void PrepareUses();
+
+  friend class ScheduleLateNodeVisitor;
+  void ScheduleLate();
+
+  bool ConnectFloatingControl();
+
+  void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULER_H_
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
new file mode 100644
index 0000000..f794525
--- /dev/null
+++ b/src/compiler/simplified-lowering.cc
@@ -0,0 +1,945 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Macro for outputting trace information from representation inference.
+#define TRACE(x) \
+  if (FLAG_trace_representation) PrintF x
+
+// Representation selection and lowering of {Simplified} operators to machine
+// operators are interwined. We use a fixpoint calculation to compute both the
+// output representation and the best possible lowering for {Simplified} nodes.
+// Representation change insertion ensures that all values are in the correct
+// machine representation after this phase, as dictated by the machine
+// operators themselves.
+enum Phase {
+  // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
+  //     backwards from uses to definitions, around cycles in phis, according
+  //     to local rules for each operator.
+  //     During this phase, the usage information for a node determines the best
+  //     possible lowering for each operator so far, and that in turn determines
+  //     the output representation.
+  //     Therefore, to be correct, this phase must iterate to a fixpoint before
+  //     the next phase can begin.
+  PROPAGATE,
+
+  // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
+  //     operators for some nodes, expanding some nodes to multiple nodes, or
+  //     removing some (redundant) nodes.
+  //     During this phase, use the {RepresentationChanger} to insert
+  //     representation changes between uses that demand a particular
+  //     representation and nodes that produce a different representation.
+  LOWER
+};
+
+
+class RepresentationSelector {
+ public:
+  // Information for each node tracked during the fixpoint.
+  struct NodeInfo {
+    MachineTypeUnion use : 15;     // Union of all usages for the node.
+    bool queued : 1;           // Bookkeeping for the traversal.
+    bool visited : 1;          // Bookkeeping for the traversal.
+    MachineTypeUnion output : 15;  // Output type of the node.
+  };
+
+  RepresentationSelector(JSGraph* jsgraph, Zone* zone,
+                         RepresentationChanger* changer)
+      : jsgraph_(jsgraph),
+        count_(jsgraph->graph()->NodeCount()),
+        info_(zone->NewArray<NodeInfo>(count_)),
+        nodes_(zone),
+        replacements_(zone),
+        contains_js_nodes_(false),
+        phase_(PROPAGATE),
+        changer_(changer),
+        queue_(zone) {
+    memset(info_, 0, sizeof(NodeInfo) * count_);
+  }
+
+  void Run(SimplifiedLowering* lowering) {
+    // Run propagation phase to a fixpoint.
+    TRACE(("--{Propagation phase}--\n"));
+    phase_ = PROPAGATE;
+    Enqueue(jsgraph_->graph()->end());
+    // Process nodes from the queue until it is empty.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      NodeInfo* info = GetInfo(node);
+      queue_.pop();
+      info->queued = false;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      VisitNode(node, info->use, NULL);
+      TRACE(("  ==> output "));
+      PrintInfo(info->output);
+      TRACE(("\n"));
+    }
+
+    // Run lowering and change insertion phase.
+    TRACE(("--{Simplified lowering phase}--\n"));
+    phase_ = LOWER;
+    // Process nodes from the collected {nodes_} vector.
+    for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
+      Node* node = *i;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      // Reuse {VisitNode()} so the representation rules are in one place.
+      VisitNode(node, GetUseInfo(node), lowering);
+    }
+
+    // Perform the final replacements.
+    for (NodeVector::iterator i = replacements_.begin();
+         i != replacements_.end(); ++i) {
+      Node* node = *i;
+      Node* replacement = *(++i);
+      node->ReplaceUses(replacement);
+    }
+  }
+
+  // Enqueue {node} if the {use} contains new information for that node.
+  // Add {node} to {nodes_} if this is the first time it's been visited.
+  void Enqueue(Node* node, MachineTypeUnion use = 0) {
+    if (phase_ != PROPAGATE) return;
+    NodeInfo* info = GetInfo(node);
+    if (!info->visited) {
+      // First visit of this node.
+      info->visited = true;
+      info->queued = true;
+      nodes_.push_back(node);
+      queue_.push(node);
+      TRACE(("  initial: "));
+      info->use |= use;
+      PrintUseInfo(node);
+      return;
+    }
+    TRACE(("   queue?: "));
+    PrintUseInfo(node);
+    if ((info->use & use) != use) {
+      // New usage information for the node is available.
+      if (!info->queued) {
+        queue_.push(node);
+        info->queued = true;
+        TRACE(("   added: "));
+      } else {
+        TRACE((" inqueue: "));
+      }
+      info->use |= use;
+      PrintUseInfo(node);
+    }
+  }
+
+  bool lower() { return phase_ == LOWER; }
+
+  void Enqueue(Node* node, MachineType use) {
+    Enqueue(node, static_cast<MachineTypeUnion>(use));
+  }
+
+  void SetOutput(Node* node, MachineTypeUnion output) {
+    // Every node should have at most one output representation. Note that
+    // phis can have 0, if they have not been used in a representation-inducing
+    // instruction.
+    DCHECK((output & kRepMask) == 0 ||
+           base::bits::IsPowerOfTwo32(output & kRepMask));
+    GetInfo(node)->output = output;
+  }
+
+  bool BothInputsAre(Node* node, Type* type) {
+    DCHECK_EQ(2, node->InputCount());
+    return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
+           NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
+  }
+
+  void ProcessInput(Node* node, int index, MachineTypeUnion use) {
+    Node* input = node->InputAt(index);
+    if (phase_ == PROPAGATE) {
+      // In the propagate phase, propagate the usage information backward.
+      Enqueue(input, use);
+    } else {
+      // In the change phase, insert a change before the use if necessary.
+      if ((use & kRepMask) == 0) return;  // No input requirement on the use.
+      MachineTypeUnion output = GetInfo(input)->output;
+      if ((output & kRepMask & use) == 0) {
+        // Output representation doesn't match usage.
+        TRACE(("  change: #%d:%s(@%d #%d:%s) ", node->id(),
+               node->op()->mnemonic(), index, input->id(),
+               input->op()->mnemonic()));
+        TRACE((" from "));
+        PrintInfo(output);
+        TRACE((" to "));
+        PrintInfo(use);
+        TRACE(("\n"));
+        Node* n = changer_->GetRepresentationFor(input, output, use);
+        node->ReplaceInput(index, n);
+      }
+    }
+  }
+
+  void ProcessRemainingInputs(Node* node, int index) {
+    DCHECK_GE(index, NodeProperties::PastValueIndex(node));
+    DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+    for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
+         i < NodeProperties::PastEffectIndex(node); ++i) {
+      Enqueue(node->InputAt(i));  // Effect inputs: just visit
+    }
+    for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
+         i < NodeProperties::PastControlIndex(node); ++i) {
+      Enqueue(node->InputAt(i));  // Control inputs: just visit
+    }
+  }
+
+  // The default, most general visitation case. For {node}, process all value,
+  // context, effect, and control inputs, assuming that value inputs should have
+  // {kRepTagged} representation and can observe all output values {kTypeAny}.
+  void VisitInputs(Node* node) {
+    InputIter i = node->inputs().begin();
+    for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kMachAnyTagged);  // Value inputs
+    }
+    for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kMachAnyTagged);  // Context inputs
+    }
+    for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Effect inputs: just visit
+    }
+    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Control inputs: just visit
+    }
+    SetOutput(node, kMachAnyTagged);
+  }
+
+  // Helper for binops of the I x I -> O variety.
+  void VisitBinop(Node* node, MachineTypeUnion input_use,
+                  MachineTypeUnion output) {
+    DCHECK_EQ(2, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    ProcessInput(node, 1, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for unops of the I -> O variety.
+  void VisitUnop(Node* node, MachineTypeUnion input_use,
+                 MachineTypeUnion output) {
+    DCHECK_EQ(1, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for leaf nodes.
+  void VisitLeaf(Node* node, MachineTypeUnion output) {
+    DCHECK_EQ(0, node->InputCount());
+    SetOutput(node, output);
+  }
+
+  // Helpers for specific types of binops.
+  void VisitFloat64Binop(Node* node) {
+    VisitBinop(node, kMachFloat64, kMachFloat64);
+  }
+  void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
+  void VisitUint32Binop(Node* node) {
+    VisitBinop(node, kMachUint32, kMachUint32);
+  }
+  void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
+  void VisitUint64Binop(Node* node) {
+    VisitBinop(node, kMachUint64, kMachUint64);
+  }
+  void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
+  void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
+  void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
+  void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
+  void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
+
+  // Helper for handling phis.
+  void VisitPhi(Node* node, MachineTypeUnion use,
+                SimplifiedLowering* lowering) {
+    // First, propagate the usage information to inputs of the phi.
+    if (!lower()) {
+      int values = OperatorProperties::GetValueInputCount(node->op());
+      // Propagate {use} of the phi to value inputs, and 0 to control.
+      Node::Inputs inputs = node->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter, --values) {
+        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+        ProcessInput(node, iter.index(), values > 0 ? use : 0);
+      }
+    }
+    // Phis adapt to whatever output representation their uses demand,
+    // pushing representation changes to their inputs.
+    MachineTypeUnion use_rep = GetUseInfo(node) & kRepMask;
+    MachineTypeUnion use_type = GetUseInfo(node) & kTypeMask;
+    MachineTypeUnion rep = 0;
+    if (use_rep & kRepTagged) {
+      rep = kRepTagged;  // Tagged overrides everything.
+    } else if (use_rep & kRepFloat64) {
+      rep = kRepFloat64;
+    } else if (use_rep & kRepWord64) {
+      rep = kRepWord64;
+    } else if (use_rep & kRepWord32) {
+      rep = kRepWord32;
+    } else if (use_rep & kRepBit) {
+      rep = kRepBit;
+    } else {
+      // There was no representation associated with any of the uses.
+      // TODO(titzer): Select the best rep using phi's type, not the usage type?
+      if (use_type & kTypeAny) {
+        rep = kRepTagged;
+      } else if (use_type & kTypeNumber) {
+        rep = kRepFloat64;
+      } else if (use_type & kTypeInt64 || use_type & kTypeUint64) {
+        rep = kRepWord64;
+      } else if (use_type & kTypeInt32 || use_type & kTypeUint32) {
+        rep = kRepWord32;
+      } else if (use_type & kTypeBool) {
+        rep = kRepBit;
+      } else {
+        UNREACHABLE();  // should have at least a usage type!
+      }
+    }
+    // Preserve the usage type, but set the representation.
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    MachineTypeUnion output_type = rep | changer_->TypeFromUpperBound(upper);
+    SetOutput(node, output_type);
+
+    if (lower()) {
+      int values = OperatorProperties::GetValueInputCount(node->op());
+
+      // Update the phi operator.
+      MachineType type = static_cast<MachineType>(output_type);
+      if (type != OpParameter<MachineType>(node)) {
+        node->set_op(lowering->common()->Phi(type, values));
+      }
+
+      // Convert inputs to the output representation of this phi.
+      Node::Inputs inputs = node->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter, --values) {
+        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+        ProcessInput(node, iter.index(), values > 0 ? output_type : 0);
+      }
+    }
+  }
+
+  const Operator* Int32Op(Node* node) {
+    return changer_->Int32OperatorFor(node->opcode());
+  }
+
+  const Operator* Uint32Op(Node* node) {
+    return changer_->Uint32OperatorFor(node->opcode());
+  }
+
+  const Operator* Float64Op(Node* node) {
+    return changer_->Float64OperatorFor(node->opcode());
+  }
+
+  static MachineType AssumeImplicitFloat32Change(MachineType type) {
+    // TODO(titzer): Assume loads of float32 change representation to float64.
+    // Fix this with full support for float32 representations.
+    if (type & kRepFloat32) {
+      return static_cast<MachineType>((type & ~kRepFloat32) | kRepFloat64);
+    }
+    return type;
+  }
+
+  // Dispatching routine for visiting the node {node} with the usage {use}.
+  // Depending on the operator, propagate new usage info to the inputs.
+  void VisitNode(Node* node, MachineTypeUnion use,
+                 SimplifiedLowering* lowering) {
+    switch (node->opcode()) {
+      //------------------------------------------------------------------
+      // Common operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kStart:
+      case IrOpcode::kDead:
+        return VisitLeaf(node, 0);
+      case IrOpcode::kParameter: {
+        // TODO(titzer): use representation from linkage.
+        Type* upper = NodeProperties::GetBounds(node).upper;
+        ProcessInput(node, 0, 0);
+        SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
+        return;
+      }
+      case IrOpcode::kInt32Constant:
+        return VisitLeaf(node, kRepWord32);
+      case IrOpcode::kInt64Constant:
+        return VisitLeaf(node, kRepWord64);
+      case IrOpcode::kFloat64Constant:
+        return VisitLeaf(node, kRepFloat64);
+      case IrOpcode::kExternalConstant:
+        return VisitLeaf(node, kMachPtr);
+      case IrOpcode::kNumberConstant:
+        return VisitLeaf(node, kRepTagged);
+      case IrOpcode::kHeapConstant:
+        return VisitLeaf(node, kRepTagged);
+
+      case IrOpcode::kEnd:
+      case IrOpcode::kIfTrue:
+      case IrOpcode::kIfFalse:
+      case IrOpcode::kReturn:
+      case IrOpcode::kMerge:
+      case IrOpcode::kThrow:
+        return VisitInputs(node);  // default visit for all node inputs.
+
+      case IrOpcode::kBranch:
+        ProcessInput(node, 0, kRepBit);
+        Enqueue(NodeProperties::GetControlInput(node, 0));
+        break;
+      case IrOpcode::kPhi:
+        return VisitPhi(node, use, lowering);
+
+//------------------------------------------------------------------
+// JavaScript operators.
+//------------------------------------------------------------------
+// For now, we assume that all JS operators were too complex to lower
+// to Simplified and that they will always require tagged value inputs
+// and produce tagged value outputs.
+// TODO(turbofan): it might be possible to lower some JSOperators here,
+// but that responsibility really lies in the typed lowering phase.
+#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
+        JS_OP_LIST(DEFINE_JS_CASE)
+#undef DEFINE_JS_CASE
+        contains_js_nodes_ = true;
+        VisitInputs(node);
+        return SetOutput(node, kRepTagged);
+
+      //------------------------------------------------------------------
+      // Simplified operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kBooleanNot: {
+        if (lower()) {
+          MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+          if (input & kRepBit) {
+            // BooleanNot(x: kRepBit) => WordEqual(x, #0)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+          } else {
+            // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+          }
+        } else {
+          // No input representation requirement; adapt during lowering.
+          ProcessInput(node, 0, kTypeBool);
+          SetOutput(node, kRepBit);
+        }
+        break;
+      }
+      case IrOpcode::kBooleanToNumber: {
+        if (lower()) {
+          MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+          if (input & kRepBit) {
+            // BooleanToNumber(x: kRepBit) => x
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
+          }
+        } else {
+          // No input representation requirement; adapt during lowering.
+          ProcessInput(node, 0, kTypeBool);
+          SetOutput(node, kMachInt32);
+        }
+        break;
+      }
+      case IrOpcode::kNumberEqual:
+      case IrOpcode::kNumberLessThan:
+      case IrOpcode::kNumberLessThanOrEqual: {
+        // Number comparisons reduce to integer comparisons for integer inputs.
+        if (BothInputsAre(node, Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32())) {
+          // => unsigned Int32Cmp
+          VisitUint32Cmp(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Cmp
+          VisitFloat64Cmp(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberAdd:
+      case IrOpcode::kNumberSubtract: {
+        // Add and subtract reduce to Int32Add/Sub if the inputs
+        // are already integers and all uses are truncating.
+        if (BothInputsAre(node, Type::Signed32()) &&
+            (use & (kTypeUint32 | kTypeNumber | kTypeAny)) == 0) {
+          // => signed Int32Add/Sub
+          VisitInt32Binop(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32()) &&
+                   (use & (kTypeInt32 | kTypeNumber | kTypeAny)) == 0) {
+          // => unsigned Int32Add/Sub
+          VisitUint32Binop(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Add/Sub
+          VisitFloat64Binop(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberMultiply:
+      case IrOpcode::kNumberDivide:
+      case IrOpcode::kNumberModulus: {
+        // Float64Mul/Div/Mod
+        VisitFloat64Binop(node);
+        if (lower()) node->set_op(Float64Op(node));
+        break;
+      }
+      case IrOpcode::kNumberToInt32: {
+        MachineTypeUnion use_rep = use & kRepMask;
+        if (lower()) {
+          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & kTypeMask) == kTypeInt32 || (in & kRepMask) == kRepWord32) {
+            // If the input has type int32, or is already a word32, just change
+            // representation if necessary.
+            VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format and perform truncation.
+            // TODO(turbofan): avoid a truncation with a smi check.
+            VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+          }
+        } else {
+          // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, kTypeInt32, kTypeInt32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kNumberToUint32: {
+        MachineTypeUnion use_rep = use & kRepMask;
+        if (lower()) {
+          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & kTypeMask) == kTypeUint32 ||
+              (in & kRepMask) == kRepWord32) {
+            // The input has type int32, just change representation.
+            VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format to perform truncation.
+            // TODO(turbofan): avoid the truncation with a smi check.
+            VisitUnop(node, kTypeUint32 | kRepFloat64,
+                      kTypeUint32 | kRepWord32);
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+          }
+        } else {
+          // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, kTypeUint32, kTypeUint32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kReferenceEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) node->set_op(lowering->machine()->WordEqual());
+        break;
+      }
+      case IrOpcode::kStringEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringEqual(node);
+        break;
+      }
+      case IrOpcode::kStringLessThan: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringLessThan(node);
+        break;
+      }
+      case IrOpcode::kStringLessThanOrEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringLessThanOrEqual(node);
+        break;
+      }
+      case IrOpcode::kStringAdd: {
+        VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
+        if (lower()) lowering->DoStringAdd(node);
+        break;
+      }
+      case IrOpcode::kLoadField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessRemainingInputs(node, 1);
+        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        if (lower()) lowering->DoLoadField(node);
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, AssumeImplicitFloat32Change(access.machine_type));
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreField(node);
+        break;
+      }
+      case IrOpcode::kLoadElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kMachInt32);  // element index
+        ProcessInput(node, 2, kMachInt32);  // length
+        ProcessRemainingInputs(node, 3);
+        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        if (lower()) lowering->DoLoadElement(node);
+        break;
+      }
+      case IrOpcode::kStoreElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kMachInt32);  // element index
+        ProcessInput(node, 2, kMachInt32);  // length
+        ProcessInput(node, 3, AssumeImplicitFloat32Change(access.machine_type));
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreElement(node);
+        break;
+      }
+
+      //------------------------------------------------------------------
+      // Machine-level operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kLoad: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        MachineType tBase = kRepTagged;
+        LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kMachInt32);  // index
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, rep);
+        break;
+      }
+      case IrOpcode::kStore: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        MachineType tBase = kRepTagged;
+        StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kMachInt32);  // index
+        ProcessInput(node, 2, rep.machine_type());
+        ProcessRemainingInputs(node, 3);
+        SetOutput(node, 0);
+        break;
+      }
+      case IrOpcode::kWord32Shr:
+        // We output unsigned int32 for shift right because JavaScript.
+        return VisitBinop(node, kRepWord32, kRepWord32 | kTypeUint32);
+      case IrOpcode::kWord32And:
+      case IrOpcode::kWord32Or:
+      case IrOpcode::kWord32Xor:
+      case IrOpcode::kWord32Shl:
+      case IrOpcode::kWord32Sar:
+        // We use signed int32 as the output type for these word32 operations,
+        // though the machine bits are the same for either signed or unsigned,
+        // because JavaScript considers the result from these operations signed.
+        return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
+      case IrOpcode::kWord32Equal:
+        return VisitBinop(node, kRepWord32, kRepBit);
+
+      case IrOpcode::kInt32Add:
+      case IrOpcode::kInt32Sub:
+      case IrOpcode::kInt32Mul:
+      case IrOpcode::kInt32Div:
+      case IrOpcode::kInt32Mod:
+        return VisitInt32Binop(node);
+      case IrOpcode::kInt32UDiv:
+      case IrOpcode::kInt32UMod:
+        return VisitUint32Binop(node);
+      case IrOpcode::kInt32LessThan:
+      case IrOpcode::kInt32LessThanOrEqual:
+        return VisitInt32Cmp(node);
+
+      case IrOpcode::kUint32LessThan:
+      case IrOpcode::kUint32LessThanOrEqual:
+        return VisitUint32Cmp(node);
+
+      case IrOpcode::kInt64Add:
+      case IrOpcode::kInt64Sub:
+      case IrOpcode::kInt64Mul:
+      case IrOpcode::kInt64Div:
+      case IrOpcode::kInt64Mod:
+        return VisitInt64Binop(node);
+      case IrOpcode::kInt64LessThan:
+      case IrOpcode::kInt64LessThanOrEqual:
+        return VisitInt64Cmp(node);
+
+      case IrOpcode::kInt64UDiv:
+      case IrOpcode::kInt64UMod:
+        return VisitUint64Binop(node);
+
+      case IrOpcode::kWord64And:
+      case IrOpcode::kWord64Or:
+      case IrOpcode::kWord64Xor:
+      case IrOpcode::kWord64Shl:
+      case IrOpcode::kWord64Shr:
+      case IrOpcode::kWord64Sar:
+        return VisitBinop(node, kRepWord64, kRepWord64);
+      case IrOpcode::kWord64Equal:
+        return VisitBinop(node, kRepWord64, kRepBit);
+
+      case IrOpcode::kChangeInt32ToInt64:
+        return VisitUnop(node, kTypeInt32 | kRepWord32,
+                         kTypeInt32 | kRepWord64);
+      case IrOpcode::kChangeUint32ToUint64:
+        return VisitUnop(node, kTypeUint32 | kRepWord32,
+                         kTypeUint32 | kRepWord64);
+      case IrOpcode::kTruncateInt64ToInt32:
+        // TODO(titzer): Is kTypeInt32 correct here?
+        return VisitUnop(node, kTypeInt32 | kRepWord64,
+                         kTypeInt32 | kRepWord32);
+
+      case IrOpcode::kChangeInt32ToFloat64:
+        return VisitUnop(node, kTypeInt32 | kRepWord32,
+                         kTypeInt32 | kRepFloat64);
+      case IrOpcode::kChangeUint32ToFloat64:
+        return VisitUnop(node, kTypeUint32 | kRepWord32,
+                         kTypeUint32 | kRepFloat64);
+      case IrOpcode::kChangeFloat64ToInt32:
+        return VisitUnop(node, kTypeInt32 | kRepFloat64,
+                         kTypeInt32 | kRepWord32);
+      case IrOpcode::kChangeFloat64ToUint32:
+        return VisitUnop(node, kTypeUint32 | kRepFloat64,
+                         kTypeUint32 | kRepWord32);
+
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kFloat64Mul:
+      case IrOpcode::kFloat64Div:
+      case IrOpcode::kFloat64Mod:
+        return VisitFloat64Binop(node);
+      case IrOpcode::kFloat64Sqrt:
+        return VisitUnop(node, kMachFloat64, kMachFloat64);
+      case IrOpcode::kFloat64Equal:
+      case IrOpcode::kFloat64LessThan:
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return VisitFloat64Cmp(node);
+      default:
+        VisitInputs(node);
+        break;
+    }
+  }
+
+  void DeferReplacement(Node* node, Node* replacement) {
+    if (replacement->id() < count_) {
+      // Replace with a previously existing node eagerly.
+      node->ReplaceUses(replacement);
+    } else {
+      // Otherwise, we are replacing a node with a representation change.
+      // Such a substitution must be done after all lowering is done, because
+      // new nodes do not have {NodeInfo} entries, and that would confuse
+      // the representation change insertion for uses of it.
+      replacements_.push_back(node);
+      replacements_.push_back(replacement);
+    }
+    // TODO(titzer) node->RemoveAllInputs();  // Node is now dead.
+  }
+
+  void PrintUseInfo(Node* node) {
+    TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
+    PrintInfo(GetUseInfo(node));
+    TRACE(("\n"));
+  }
+
+  void PrintInfo(MachineTypeUnion info) {
+    if (FLAG_trace_representation) {
+      OFStream os(stdout);
+      os << static_cast<MachineType>(info);
+    }
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  int count_;                       // number of nodes in the graph
+  NodeInfo* info_;                  // node id -> usage information
+  NodeVector nodes_;                // collected nodes
+  NodeVector replacements_;         // replacements to be done after lowering
+  bool contains_js_nodes_;          // {true} if a JS operator was seen
+  Phase phase_;                     // current phase of algorithm
+  RepresentationChanger* changer_;  // for inserting representation changes
+  ZoneQueue<Node*> queue_;          // queue for traversing the graph
+
+  NodeInfo* GetInfo(Node* node) {
+    DCHECK(node->id() >= 0);
+    DCHECK(node->id() < count_);
+    return &info_[node->id()];
+  }
+
+  MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
+};
+
+
+Node* SimplifiedLowering::IsTagged(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  STATIC_ASSERT(kSmiTagMask == 1);  // Only works if tag is the low bit.
+  return graph()->NewNode(machine()->WordAnd(), node,
+                          jsgraph()->Int32Constant(kSmiTagMask));
+}
+
+
+void SimplifiedLowering::LowerAllNodes() {
+  SimplifiedOperatorBuilder simplified(graph()->zone());
+  RepresentationChanger changer(jsgraph(), &simplified,
+                                graph()->zone()->isolate());
+  RepresentationSelector selector(jsgraph(), zone(), &changer);
+  selector.Run(this);
+}
+
+
+Node* SimplifiedLowering::Untag(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+  return graph()->NewNode(machine()->WordSar(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::SmiTag(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+  return graph()->NewNode(machine()->WordShl(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
+  return jsgraph()->Int32Constant(offset - kHeapObjectTag);
+}
+
+
+static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+                                                MachineType representation,
+                                                Type* type) {
+  // TODO(turbofan): skip write barriers for Smis, etc.
+  if (base_is_tagged == kTaggedBase &&
+      RepresentationOf(representation) == kRepTagged) {
+    // Write barriers are only for writes into heap objects (i.e. tagged base).
+    return kFullWriteBarrier;
+  }
+  return kNoWriteBarrier;
+}
+
+
+void SimplifiedLowering::DoLoadField(Node* node) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  node->set_op(machine()->Load(access.machine_type));
+  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  node->InsertInput(zone(), 1, offset);
+}
+
+
+void SimplifiedLowering::DoStoreField(Node* node) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  WriteBarrierKind kind = ComputeWriteBarrierKind(
+      access.base_is_tagged, access.machine_type, access.type);
+  node->set_op(
+      machine()->Store(StoreRepresentation(access.machine_type, kind)));
+  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  node->InsertInput(zone(), 1, offset);
+}
+
+
+Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
+                                       Node* index) {
+  int element_size = ElementSizeOf(access.machine_type);
+  if (element_size != 1) {
+    index = graph()->NewNode(machine()->Int32Mul(),
+                             jsgraph()->Int32Constant(element_size), index);
+  }
+  int fixed_offset = access.header_size - access.tag();
+  if (fixed_offset == 0) return index;
+  return graph()->NewNode(machine()->Int32Add(), index,
+                          jsgraph()->Int32Constant(fixed_offset));
+}
+
+
+void SimplifiedLowering::DoLoadElement(Node* node) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  node->set_op(machine()->Load(access.machine_type));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  node->RemoveInput(2);
+}
+
+
+void SimplifiedLowering::DoStoreElement(Node* node) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  WriteBarrierKind kind = ComputeWriteBarrierKind(
+      access.base_is_tagged, access.machine_type, access.type);
+  node->set_op(
+      machine()->Store(StoreRepresentation(access.machine_type, kind)));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  node->RemoveInput(2);
+}
+
+
+void SimplifiedLowering::DoStringAdd(Node* node) {
+  Callable callable = CodeFactory::StringAdd(
+      zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc =
+      Linkage::GetStubCallDescriptor(callable.descriptor(), 0, flags, zone());
+  node->set_op(common()->Call(desc));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(callable.code()));
+  node->AppendInput(zone(), jsgraph()->UndefinedConstant());
+  node->AppendInput(zone(), graph()->start());
+  node->AppendInput(zone(), graph()->start());
+}
+
+
+Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
+  CEntryStub stub(zone()->isolate(), 1);
+  Runtime::FunctionId f =
+      requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
+  ExternalReference ref(f, zone()->isolate());
+  Operator::Properties props = node->op()->properties();
+  // TODO(mstarzinger): We should call StringCompareStub here instead, once an
+  // interface descriptor is available for it.
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(f, 2, props, zone());
+  return graph()->NewNode(common()->Call(desc),
+                          jsgraph()->HeapConstant(stub.GetCode()),
+                          NodeProperties::GetValueInput(node, 0),
+                          NodeProperties::GetValueInput(node, 1),
+                          jsgraph()->ExternalConstant(ref),
+                          jsgraph()->Int32Constant(2),
+                          jsgraph()->UndefinedConstant());
+}
+
+
+void SimplifiedLowering::DoStringEqual(Node* node) {
+  node->set_op(machine()->WordEqual());
+  node->ReplaceInput(0, StringComparison(node, false));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThan(Node* node) {
+  node->set_op(machine()->IntLessThan());
+  node->ReplaceInput(0, StringComparison(node, true));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
+  node->set_op(machine()->IntLessThanOrEqual());
+  node->ReplaceInput(0, StringComparison(node, true));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
new file mode 100644
index 0000000..2ba7e3b
--- /dev/null
+++ b/src/compiler/simplified-lowering.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
+#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedLowering {
+ public:
+  explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+  virtual ~SimplifiedLowering() {}
+
+  void LowerAllNodes();
+
+  // TODO(titzer): These are exposed for direct testing. Use a friend class.
+  void DoLoadField(Node* node);
+  void DoStoreField(Node* node);
+  void DoLoadElement(Node* node);
+  void DoStoreElement(Node* node);
+  void DoStringAdd(Node* node);
+  void DoStringEqual(Node* node);
+  void DoStringLessThan(Node* node);
+  void DoStringLessThanOrEqual(Node* node);
+
+ private:
+  JSGraph* jsgraph_;
+
+  Node* SmiTag(Node* node);
+  Node* IsTagged(Node* node);
+  Node* Untag(Node* node);
+  Node* OffsetMinusTagConstant(int32_t offset);
+  Node* ComputeIndex(const ElementAccess& access, Node* index);
+  Node* StringComparison(Node* node, bool requires_ordering);
+
+  friend class RepresentationSelector;
+
+  Zone* zone() { return jsgraph_->zone(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph()->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_LOWERING_H_
diff --git a/src/compiler/simplified-operator-reducer-unittest.cc b/src/compiler/simplified-operator-reducer-unittest.cc
new file mode 100644
index 0000000..739264e
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer-unittest.cc
@@ -0,0 +1,483 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "src/conversions.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedOperatorReducerTest : public GraphTest {
+ public:
+  explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
+      : GraphTest(num_parameters), simplified_(zone()) {}
+  virtual ~SimplifiedOperatorReducerTest() {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+    SimplifiedOperatorReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+  SimplifiedOperatorBuilder simplified_;
+};
+
+
+template <typename T>
+class SimplifiedOperatorReducerTestWithParam
+    : public SimplifiedOperatorReducerTest,
+      public ::testing::WithParamInterface<T> {
+ public:
+  explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
+      : SimplifiedOperatorReducerTest(num_parameters) {}
+  virtual ~SimplifiedOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const double kFloat64Values[] = {
+    -V8_INFINITY,  -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+    -8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
+    -1.67913e+188, -1.6257e+184,  -2.60043e+170, -2.52941e+168, -3.06033e+116,
+    -4.56201e+52,  -3.56788e+50,  -9.9066e+38,   -3.07261e+31,  -2.1271e+09,
+    -1.91489e+09,  -1.73053e+09,  -9.30675e+08,  -26030,        -20453,
+    -15790,        -11699,        -111,          -97,           -78,
+    -63,           -58,           -1.53858e-06,  -2.98914e-12,  -1.14741e-39,
+    -8.20347e-57,  -1.48932e-59,  -3.17692e-66,  -8.93103e-81,  -3.91337e-83,
+    -6.0489e-92,   -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+    -1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
+    -1.11612e-293, -0.0,          0.0,           2.22507e-308,  1.06526e-307,
+    4.16643e-227,  6.76624e-223,  2.0432e-197,   3.16254e-184,  1.37315e-173,
+    2.88603e-172,  1.54155e-99,   4.42923e-81,   1.40539e-73,   5.4462e-73,
+    1.24064e-58,   3.11167e-58,   2.75826e-39,   0.143815,      58,
+    67,            601,           7941,          11644,         13697,
+    25680,         29882,         1.32165e+08,   1.62439e+08,   4.16837e+08,
+    9.59097e+08,   1.32491e+09,   1.8728e+09,    1.0672e+17,    2.69606e+46,
+    1.98285e+79,   1.0098e+82,    7.93064e+88,   3.67444e+121,  9.36506e+123,
+    7.27954e+162,  3.05316e+168,  1.16171e+175,  1.64771e+189,  1.1622e+202,
+    2.00748e+239,  2.51778e+244,  3.90282e+306,  1.79769e+308,  V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+    -2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
+    -1318814539,     -1289388009, -1287537572, -1279026536, -1241605942,
+    -1226046939,     -941837148,  -779818051,  -413830641,  -245798087,
+    -184657557,      -127145950,  -105483328,  -32325,      -26653,
+    -23858,          -23834,      -22363,      -19858,      -19044,
+    -18744,          -15528,      -5309,       -3372,       -2093,
+    -104,            -98,         -97,         -93,         -84,
+    -80,             -78,         -76,         -72,         -58,
+    -57,             -56,         -55,         -45,         -40,
+    -34,             -32,         -25,         -24,         -5,
+    -2,              0,           3,           10,          24,
+    34,              42,          46,          47,          48,
+    52,              56,          64,          65,          71,
+    76,              79,          81,          82,          97,
+    102,             103,         104,         106,         107,
+    109,             116,         122,         3653,        4485,
+    12405,           16504,       26262,       28704,       29755,
+    30554,           16476817,    605431957,   832401070,   873617242,
+    914205764,       1062628108,  1087581664,  1488498068,  1534668023,
+    1661587028,      1696896187,  1866841746,  2032089723,  2147483647};
+
+
+static const uint32_t kUint32Values[] = {
+    0x0,        0x5,        0x8,        0xc,        0xd,        0x26,
+    0x28,       0x29,       0x30,       0x34,       0x3e,       0x42,
+    0x50,       0x5b,       0x63,       0x71,       0x77,       0x7c,
+    0x83,       0x88,       0x96,       0x9c,       0xa3,       0xfa,
+    0x7a7,      0x165d,     0x234d,     0x3acb,     0x43a5,     0x4573,
+    0x5b4f,     0x5f14,     0x6996,     0x6c6e,     0x7289,     0x7b9a,
+    0x7bc9,     0x86bb,     0xa839,     0xaa41,     0xb03b,     0xc942,
+    0xce68,     0xcf4c,     0xd3ad,     0xdea3,     0xe90c,     0xed86,
+    0xfba5,     0x172dcc6,  0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
+    0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
+    0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
+    0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
+    0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
+    0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
+    0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
+    0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
+    0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
+
+
+MATCHER(IsNaN, std::string(negation ? "isn't" : "is") + " NaN") {
+  return std::isnan(arg);
+}
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+  const Operator* (SimplifiedOperatorBuilder::*constructor)();
+  const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+  return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+    {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
+    {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
+    {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
+    {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
+     "ChangeFloat64ToTagged"},
+    {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
+     "ChangeTaggedToFloat64"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
+    {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+
+}  // namespace
+
+
+typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
+    SimplifiedUnaryOperatorTest;
+
+
+TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
+  const UnaryOperator& unop = GetParam();
+  Reduction reduction = Reduce(
+      graph()->NewNode((simplified()->*unop.constructor)(), Parameter(0)));
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
+                        SimplifiedUnaryOperatorTest,
+                        ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// BooleanNot
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithBooleanNot) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->BooleanNot(),
+                       graph()->NewNode(simplified()->BooleanNot(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithFalseConstant) {
+  Reduction reduction0 =
+      Reduce(graph()->NewNode(simplified()->BooleanNot(), FalseConstant()));
+  ASSERT_TRUE(reduction0.Changed());
+  EXPECT_THAT(reduction0.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
+  Reduction reduction1 =
+      Reduce(graph()->NewNode(simplified()->BooleanNot(), TrueConstant()));
+  ASSERT_TRUE(reduction1.Changed());
+  EXPECT_THAT(reduction1.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeBitToBool(),
+      graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeBoolToBit(),
+      graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(n));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
+  TRACED_FOREACH(int32_t, n, kInt32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeInt32ToTagged(), Int32Constant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastI2D(n)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToFloat64
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeInt32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeInt32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeUint32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeUint32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToFloat64(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(n));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToInt32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToInt32WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToInt32(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToInt32WithChangeInt32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToInt32(),
+      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToUint32(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToUint32WithChangeUint32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToUint32(),
+      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
+  TRACED_FOREACH(uint32_t, n, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
+                                Int32Constant(bit_cast<int32_t>(n))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
new file mode 100644
index 0000000..f6181ea
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
+
+
+Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kBooleanNot: {
+      HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+        return Replace(jsgraph()->TrueConstant());
+      }
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+        return Replace(jsgraph()->FalseConstant());
+      }
+      if (m.IsBooleanNot()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeBitToBool: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
+      if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
+      if (m.IsChangeBoolToBit()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeBoolToBit: {
+      HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+        return ReplaceInt32(0);
+      }
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+        return ReplaceInt32(1);
+      }
+      if (m.IsChangeBitToBool()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToTagged: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeInt32ToTagged: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeTaggedToFloat64: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
+      if (m.IsChangeInt32ToTagged()) {
+        return Change(node, machine()->ChangeInt32ToFloat64(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeUint32ToTagged()) {
+        return Change(node, machine()->ChangeUint32ToFloat64(),
+                      m.node()->InputAt(0));
+      }
+      break;
+    }
+    case IrOpcode::kChangeTaggedToInt32: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeFloat64ToTagged()) {
+        return Change(node, machine()->ChangeFloat64ToInt32(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeInt32ToTagged()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeTaggedToUint32: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
+      if (m.IsChangeFloat64ToTagged()) {
+        return Change(node, machine()->ChangeFloat64ToUint32(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeUint32ToTagged()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeUint32ToTagged: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
+                                            Node* a) {
+  node->set_op(op);
+  node->ReplaceInput(0, a);
+  return Changed(node);
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
+  return Replace(jsgraph()->Float64Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceInt32(int32_t value) {
+  return Replace(jsgraph()->Int32Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(double value) {
+  return Replace(jsgraph()->Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
+  return Replace(jsgraph()->Constant(value));
+}
+
+
+Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+
+Factory* SimplifiedOperatorReducer::factory() const {
+  return jsgraph()->isolate()->factory();
+}
+
+
+MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
new file mode 100644
index 0000000..32f49ad
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Heap;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+class MachineOperatorBuilder;
+
+class SimplifiedOperatorReducer FINAL : public Reducer {
+ public:
+  explicit SimplifiedOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+  virtual ~SimplifiedOperatorReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Reduction Change(Node* node, const Operator* op, Node* a);
+  Reduction ReplaceFloat64(double value);
+  Reduction ReplaceInt32(int32_t value);
+  Reduction ReplaceUint32(uint32_t value) {
+    return ReplaceInt32(bit_cast<int32_t>(value));
+  }
+  Reduction ReplaceNumber(double value);
+  Reduction ReplaceNumber(int32_t value);
+
+  Graph* graph() const;
+  Factory* factory() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
diff --git a/src/compiler/simplified-operator-unittest.cc b/src/compiler/simplified-operator-unittest.cc
new file mode 100644
index 0000000..4014f24
--- /dev/null
+++ b/src/compiler/simplified-operator-unittest.cc
@@ -0,0 +1,222 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Drop once we use std::ostream instead of our OStream.
+inline std::ostream& operator<<(std::ostream& os, const ElementAccess& access) {
+  OStringStream ost;
+  ost << access;
+  return os << ost.c_str();
+}
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+  const Operator* (SimplifiedOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  Operator::Properties properties;
+  int value_input_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+  return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, properties, input_count)              \
+  {                                                      \
+    &SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
+        Operator::kPure | properties, input_count        \
+  }
+    PURE(BooleanNot, Operator::kNoProperties, 1),
+    PURE(NumberEqual, Operator::kCommutative, 2),
+    PURE(NumberLessThan, Operator::kNoProperties, 2),
+    PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
+    PURE(NumberAdd, Operator::kCommutative, 2),
+    PURE(NumberSubtract, Operator::kNoProperties, 2),
+    PURE(NumberMultiply, Operator::kCommutative, 2),
+    PURE(NumberDivide, Operator::kNoProperties, 2),
+    PURE(NumberModulus, Operator::kNoProperties, 2),
+    PURE(NumberToInt32, Operator::kNoProperties, 1),
+    PURE(NumberToUint32, Operator::kNoProperties, 1),
+    PURE(StringEqual, Operator::kCommutative, 2),
+    PURE(StringLessThan, Operator::kNoProperties, 2),
+    PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
+    PURE(StringAdd, Operator::kNoProperties, 2),
+    PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
+    PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
+    PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
+    PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
+    PURE(ChangeBitToBool, Operator::kNoProperties, 1)
+#undef PURE
+};
+
+}  // namespace
+
+
+class SimplifiedPureOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<PureOperator> {};
+
+
+TEST_P(SimplifiedPureOperatorTest, InstancesAreGloballyShared) {
+  const PureOperator& pop = GetParam();
+  SimplifiedOperatorBuilder simplified1(zone());
+  SimplifiedOperatorBuilder simplified2(zone());
+  EXPECT_EQ((simplified1.*pop.constructor)(), (simplified2.*pop.constructor)());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, NumberOfInputsAndOutputs) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, OpcodeIsCorrect) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+  EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, Properties) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+  EXPECT_EQ(pop.properties, op->properties() & pop.properties);
+}
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
+                        ::testing::ValuesIn(kPureOperators));
+
+
+// -----------------------------------------------------------------------------
+// Element access operators.
+
+namespace {
+
+const ElementAccess kElementAccesses[] = {
+    {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt8},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt16},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt32},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint8},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint16},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint32},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat64}};
+
+}  // namespace
+
+
+class SimplifiedElementAccessOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<ElementAccess> {};
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const ElementAccess& access = GetParam();
+  const Operator* op = simplified.LoadElement(access);
+
+  EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
+  EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+  EXPECT_EQ(access, ElementAccessOf(op));
+
+  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const ElementAccess& access = GetParam();
+  const Operator* op = simplified.StoreElement(access);
+
+  EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
+  EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+  EXPECT_EQ(access, ElementAccessOf(op));
+
+  EXPECT_EQ(4, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+                        SimplifiedElementAccessOperatorTest,
+                        ::testing::ValuesIn(kElementAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
new file mode 100644
index 0000000..642ffc7
--- /dev/null
+++ b/src/compiler/simplified-operator.cc
@@ -0,0 +1,178 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, BaseTaggedness base_taggedness) {
+  switch (base_taggedness) {
+    case kUntaggedBase:
+      return os << "untagged base";
+    case kTaggedBase:
+      return os << "tagged base";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+  return lhs.base_is_tagged == rhs.base_is_tagged &&
+         lhs.header_size == rhs.header_size && lhs.type == rhs.type &&
+         lhs.machine_type == rhs.machine_type;
+}
+
+
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+OStream& operator<<(OStream& os, ElementAccess const& access) {
+  os << "[" << access.base_is_tagged << ", " << access.header_size << ", ";
+  access.type->PrintTo(os);
+  os << ", " << access.machine_type << "]";
+  return os;
+}
+
+
+const FieldAccess& FieldAccessOf(const Operator* op) {
+  DCHECK_NOT_NULL(op);
+  DCHECK(op->opcode() == IrOpcode::kLoadField ||
+         op->opcode() == IrOpcode::kStoreField);
+  return OpParameter<FieldAccess>(op);
+}
+
+
+const ElementAccess& ElementAccessOf(const Operator* op) {
+  DCHECK_NOT_NULL(op);
+  DCHECK(op->opcode() == IrOpcode::kLoadElement ||
+         op->opcode() == IrOpcode::kStoreElement);
+  return OpParameter<ElementAccess>(op);
+}
+
+
+// Specialization for static parameters of type {FieldAccess}.
+template <>
+struct StaticParameterTraits<FieldAccess> {
+  static OStream& PrintTo(OStream& os, const FieldAccess& val) {
+    return os << val.offset;
+  }
+  static int HashCode(const FieldAccess& val) {
+    return (val.offset < 16) | (val.machine_type & 0xffff);
+  }
+  static bool Equals(const FieldAccess& lhs, const FieldAccess& rhs) {
+    return lhs.base_is_tagged == rhs.base_is_tagged &&
+           lhs.offset == rhs.offset && lhs.machine_type == rhs.machine_type &&
+           lhs.type->Is(rhs.type);
+  }
+};
+
+
+// Specialization for static parameters of type {ElementAccess}.
+template <>
+struct StaticParameterTraits<ElementAccess> {
+  static OStream& PrintTo(OStream& os, const ElementAccess& access) {
+    return os << access;
+  }
+  static int HashCode(const ElementAccess& access) {
+    return (access.header_size < 16) | (access.machine_type & 0xffff);
+  }
+  static bool Equals(const ElementAccess& lhs, const ElementAccess& rhs) {
+    return lhs.base_is_tagged == rhs.base_is_tagged &&
+           lhs.header_size == rhs.header_size &&
+           lhs.machine_type == rhs.machine_type && lhs.type->Is(rhs.type);
+  }
+};
+
+
+#define PURE_OP_LIST(V)                                \
+  V(BooleanNot, Operator::kNoProperties, 1)            \
+  V(BooleanToNumber, Operator::kNoProperties, 1)       \
+  V(NumberEqual, Operator::kCommutative, 2)            \
+  V(NumberLessThan, Operator::kNoProperties, 2)        \
+  V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+  V(NumberAdd, Operator::kCommutative, 2)              \
+  V(NumberSubtract, Operator::kNoProperties, 2)        \
+  V(NumberMultiply, Operator::kCommutative, 2)         \
+  V(NumberDivide, Operator::kNoProperties, 2)          \
+  V(NumberModulus, Operator::kNoProperties, 2)         \
+  V(NumberToInt32, Operator::kNoProperties, 1)         \
+  V(NumberToUint32, Operator::kNoProperties, 1)        \
+  V(StringEqual, Operator::kCommutative, 2)            \
+  V(StringLessThan, Operator::kNoProperties, 2)        \
+  V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
+  V(StringAdd, Operator::kNoProperties, 2)             \
+  V(ChangeTaggedToInt32, Operator::kNoProperties, 1)   \
+  V(ChangeTaggedToUint32, Operator::kNoProperties, 1)  \
+  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+  V(ChangeInt32ToTagged, Operator::kNoProperties, 1)   \
+  V(ChangeUint32ToTagged, Operator::kNoProperties, 1)  \
+  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+  V(ChangeBoolToBit, Operator::kNoProperties, 1)       \
+  V(ChangeBitToBool, Operator::kNoProperties, 1)
+
+
+#define ACCESS_OP_LIST(V)                                 \
+  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1)     \
+  V(StoreField, FieldAccess, Operator::kNoRead, 2, 0)     \
+  V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 1) \
+  V(StoreElement, ElementAccess, Operator::kNoRead, 4, 0)
+
+
+struct SimplifiedOperatorBuilderImpl FINAL {
+#define PURE(Name, properties, input_count)                               \
+  struct Name##Operator FINAL : public SimpleOperator {                   \
+    Name##Operator()                                                      \
+        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+                         input_count, 1, #Name) {}                        \
+  };                                                                      \
+  Name##Operator k##Name;
+  PURE_OP_LIST(PURE)
+#undef PURE
+};
+
+
+static base::LazyInstance<SimplifiedOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
+    : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define PURE(Name, properties, input_count) \
+  const Operator* SimplifiedOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
+  // TODO(titzer): What about the type parameter?
+  return new (zone()) SimpleOperator(IrOpcode::kReferenceEqual,
+                                     Operator::kCommutative | Operator::kPure,
+                                     2, 1, "ReferenceEqual");
+}
+
+
+#define ACCESS(Name, Type, properties, input_count, output_count)           \
+  const Operator* SimplifiedOperatorBuilder::Name(const Type& access) {     \
+    return new (zone())                                                     \
+        Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
+                        input_count, output_count, #Name, access);          \
+  }
+ACCESS_OP_LIST(ACCESS)
+#undef ACCESS
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
new file mode 100644
index 0000000..32f0e8b
--- /dev/null
+++ b/src/compiler/simplified-operator.h
@@ -0,0 +1,152 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
+class Zone;
+
+
+namespace compiler {
+
+// Forward declarations.
+class Operator;
+struct SimplifiedOperatorBuilderImpl;
+
+
+enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+
+OStream& operator<<(OStream&, BaseTaggedness);
+
+// An access descriptor for loads/stores of fixed structures like field
+// accesses of heap objects. Accesses from either tagged or untagged base
+// pointers are supported; untagging is done automatically during lowering.
+struct FieldAccess {
+  BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
+  int offset;                     // offset of the field, without tag.
+  Handle<Name> name;              // debugging only.
+  Type* type;                     // type of the field.
+  MachineType machine_type;       // machine type of the field.
+
+  int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+
+// An access descriptor for loads/stores of indexed structures like characters
+// in strings or off-heap backing stores. Accesses from either tagged or
+// untagged base pointers are supported; untagging is done automatically during
+// lowering.
+struct ElementAccess {
+  BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
+  int header_size;                // size of the header, without tag.
+  Type* type;                     // type of the element.
+  MachineType machine_type;       // machine type of the element.
+
+  int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs);
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs);
+
+OStream& operator<<(OStream&, ElementAccess const&);
+
+
+// If the accessed object is not a heap object, add this to the header_size.
+static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
+
+
+const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
+
+// Interface for building simplified operators, which represent the
+// medium-level operations of V8, including adding numbers, allocating objects,
+// indexing into objects and arrays, etc.
+// All operators are typed but many are representation independent.
+
+// Number values from JS can be in one of these representations:
+//   - Tagged: word-sized integer that is either
+//     - a signed small integer (31 or 32 bits plus a tag)
+//     - a tagged pointer to a HeapNumber object that has a float64 field
+//   - Int32: an untagged signed 32-bit integer
+//   - Uint32: an untagged unsigned 32-bit integer
+//   - Float64: an untagged float64
+
+// Additional representations for intermediate code or non-JS code:
+//   - Int64: an untagged signed 64-bit integer
+//   - Uint64: an untagged unsigned 64-bit integer
+//   - Float32: an untagged float32
+
+// Boolean values can be:
+//   - Bool: a tagged pointer to either the canonical JS #false or
+//           the canonical JS #true object
+//   - Bit: an untagged integer 0 or 1, but word-sized
+class SimplifiedOperatorBuilder FINAL {
+ public:
+  explicit SimplifiedOperatorBuilder(Zone* zone);
+
+  const Operator* BooleanNot();
+  const Operator* BooleanToNumber();
+
+  const Operator* NumberEqual();
+  const Operator* NumberLessThan();
+  const Operator* NumberLessThanOrEqual();
+  const Operator* NumberAdd();
+  const Operator* NumberSubtract();
+  const Operator* NumberMultiply();
+  const Operator* NumberDivide();
+  const Operator* NumberModulus();
+  const Operator* NumberToInt32();
+  const Operator* NumberToUint32();
+
+  const Operator* ReferenceEqual(Type* type);
+
+  const Operator* StringEqual();
+  const Operator* StringLessThan();
+  const Operator* StringLessThanOrEqual();
+  const Operator* StringAdd();
+
+  const Operator* ChangeTaggedToInt32();
+  const Operator* ChangeTaggedToUint32();
+  const Operator* ChangeTaggedToFloat64();
+  const Operator* ChangeInt32ToTagged();
+  const Operator* ChangeUint32ToTagged();
+  const Operator* ChangeFloat64ToTagged();
+  const Operator* ChangeBoolToBit();
+  const Operator* ChangeBitToBool();
+
+  const Operator* LoadField(const FieldAccess&);
+  const Operator* StoreField(const FieldAccess&);
+
+  // load-element [base + index], length
+  const Operator* LoadElement(ElementAccess const&);
+
+  // store-element [base + index], length, value
+  const Operator* StoreElement(ElementAccess const&);
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  const SimplifiedOperatorBuilderImpl& impl_;
+  Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_OPERATOR_H_
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
new file mode 100644
index 0000000..1178390
--- /dev/null
+++ b/src/compiler/source-position.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/source-position.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-aux-data-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SourcePositionTable::Decorator : public GraphDecorator {
+ public:
+  explicit Decorator(SourcePositionTable* source_positions)
+      : source_positions_(source_positions) {}
+
+  virtual void Decorate(Node* node) {
+    DCHECK(!source_positions_->current_position_.IsInvalid());
+    source_positions_->table_.Set(node, source_positions_->current_position_);
+  }
+
+ private:
+  SourcePositionTable* source_positions_;
+};
+
+
+SourcePositionTable::SourcePositionTable(Graph* graph)
+    : graph_(graph),
+      decorator_(NULL),
+      current_position_(SourcePosition::Invalid()),
+      table_(graph->zone()) {}
+
+
+void SourcePositionTable::AddDecorator() {
+  DCHECK(decorator_ == NULL);
+  decorator_ = new (graph_->zone()) Decorator(this);
+  graph_->AddDecorator(decorator_);
+}
+
+
+void SourcePositionTable::RemoveDecorator() {
+  DCHECK(decorator_ != NULL);
+  graph_->RemoveDecorator(decorator_);
+  decorator_ = NULL;
+}
+
+
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+  return table_.Get(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
new file mode 100644
index 0000000..778f067
--- /dev/null
+++ b/src/compiler/source-position.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SOURCE_POSITION_H_
+#define V8_COMPILER_SOURCE_POSITION_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Encapsulates encoding and decoding of sources positions from which Nodes
+// originated.
+class SourcePosition FINAL {
+ public:
+  explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
+
+  static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
+  bool IsUnknown() const { return raw() == kUnknownPosition; }
+
+  static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
+  bool IsInvalid() const { return raw() == kInvalidPosition; }
+
+  int raw() const { return raw_; }
+
+ private:
+  static const int kInvalidPosition = -2;
+  static const int kUnknownPosition = RelocInfo::kNoPosition;
+  STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
+  int raw_;
+};
+
+
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return lhs.raw() == rhs.raw();
+}
+
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return !(lhs == rhs);
+}
+
+
+class SourcePositionTable FINAL {
+ public:
+  class Scope {
+   public:
+    Scope(SourcePositionTable* source_positions, SourcePosition position)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(position);
+    }
+    Scope(SourcePositionTable* source_positions, Node* node)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(source_positions_->GetSourcePosition(node));
+    }
+    ~Scope() { source_positions_->current_position_ = prev_position_; }
+
+   private:
+    void Init(SourcePosition position) {
+      if (!position.IsUnknown() || prev_position_.IsInvalid()) {
+        source_positions_->current_position_ = position;
+      }
+    }
+
+    SourcePositionTable* source_positions_;
+    SourcePosition prev_position_;
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+  explicit SourcePositionTable(Graph* graph);
+  ~SourcePositionTable() {
+    if (decorator_ != NULL) RemoveDecorator();
+  }
+
+  void AddDecorator();
+  void RemoveDecorator();
+
+  SourcePosition GetSourcePosition(Node* node);
+
+ private:
+  class Decorator;
+
+  Graph* graph_;
+  Decorator* decorator_;
+  SourcePosition current_position_;
+  NodeAuxData<SourcePosition> table_;
+
+  DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
new file mode 100644
index 0000000..bfecdef
--- /dev/null
+++ b/src/compiler/typer.cc
@@ -0,0 +1,904 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Typer::Typer(Zone* zone) : zone_(zone) {
+  Type* number = Type::Number(zone);
+  Type* signed32 = Type::Signed32(zone);
+  Type* unsigned32 = Type::Unsigned32(zone);
+  Type* integral32 = Type::Integral32(zone);
+  Type* object = Type::Object(zone);
+  Type* undefined = Type::Undefined(zone);
+  number_fun0_ = Type::Function(number, zone);
+  number_fun1_ = Type::Function(number, number, zone);
+  number_fun2_ = Type::Function(number, number, number, zone);
+  imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
+
+#define NATIVE_TYPE(sem, rep) \
+  Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
+  // TODO(rossberg): Use range types for more precision, once we have them.
+  Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8);
+  Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16);
+  Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
+  Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8);
+  Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16);
+  Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
+  Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
+  Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
+#undef NATIVE_TYPE
+  Type* buffer = Type::Buffer(zone);
+  Type* int8_array = Type::Array(int8, zone);
+  Type* int16_array = Type::Array(int16, zone);
+  Type* int32_array = Type::Array(int32, zone);
+  Type* uint8_array = Type::Array(uint8, zone);
+  Type* uint16_array = Type::Array(uint16, zone);
+  Type* uint32_array = Type::Array(uint32, zone);
+  Type* float32_array = Type::Array(float32, zone);
+  Type* float64_array = Type::Array(float64, zone);
+  Type* arg1 = Type::Union(unsigned32, object, zone);
+  Type* arg2 = Type::Union(unsigned32, undefined, zone);
+  Type* arg3 = arg2;
+  array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
+  int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
+  int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
+  int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
+  uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
+  uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
+  uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
+  float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
+  float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+}
+
+
+class Typer::Visitor : public NullNodeVisitor {
+ public:
+  Visitor(Typer* typer, MaybeHandle<Context> context)
+      : typer_(typer), context_(context) {}
+
+  Bounds TypeNode(Node* node) {
+    switch (node->opcode()) {
+#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
+      DECLARE_CASE(Start)
+      VALUE_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+      DECLARE_CASE(End)
+      INNER_CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+      break;
+    }
+    UNREACHABLE();
+    return Bounds();
+  }
+
+  Type* TypeConstant(Handle<Object> value);
+
+ protected:
+#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+  DECLARE_METHOD(Start)
+  VALUE_OP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+  Bounds OperandType(Node* node, int i) {
+    return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i));
+  }
+
+  Type* ContextType(Node* node) {
+    Bounds result =
+        NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
+    DCHECK(result.upper->Maybe(Type::Internal()));
+    // TODO(rossberg): More precisely, instead of the above assertion, we should
+    // back-propagate the constraint that it has to be a subtype of Internal.
+    return result.upper;
+  }
+
+  Zone* zone() { return typer_->zone(); }
+  Isolate* isolate() { return typer_->isolate(); }
+  MaybeHandle<Context> context() { return context_; }
+
+ private:
+  Typer* typer_;
+  MaybeHandle<Context> context_;
+};
+
+
+class Typer::RunVisitor : public Typer::Visitor {
+ public:
+  RunVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context),
+        redo(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds bounds = TypeNode(node);
+      NodeProperties::SetBounds(node, bounds);
+      // Remember incompletely typed nodes for least fixpoint iteration.
+      int arity = OperatorProperties::GetValueInputCount(node->op());
+      for (int i = 0; i < arity; ++i) {
+        // TODO(rossberg): change once IsTyped is available.
+        // if (!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i)))
+        if (OperandType(node, i).upper->Is(Type::None())) {
+          redo.insert(node);
+          break;
+        }
+      }
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  NodeSet redo;
+};
+
+
+class Typer::NarrowVisitor : public Typer::Visitor {
+ public:
+  NarrowVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds previous = NodeProperties::GetBounds(node);
+      Bounds bounds = TypeNode(node);
+      NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
+      DCHECK(bounds.Narrows(previous));
+      // Stop when nothing changed (but allow re-entry in case it does later).
+      return previous.Narrows(bounds)
+          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+    } else {
+      return GenericGraphVisit::SKIP;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+class Typer::WidenVisitor : public Typer::Visitor {
+ public:
+  WidenVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds previous = NodeProperties::GetBounds(node);
+      Bounds bounds = TypeNode(node);
+      DCHECK(previous.lower->Is(bounds.lower));
+      DCHECK(previous.upper->Is(bounds.upper));
+      NodeProperties::SetBounds(node, bounds);  // TODO(rossberg): Either?
+      // Stop when nothing changed (but allow re-entry in case it does later).
+      return bounds.Narrows(previous)
+          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+    } else {
+      return GenericGraphVisit::SKIP;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
+  RunVisitor typing(this, context);
+  graph->VisitNodeInputsFromEnd(&typing);
+  // Find least fixpoint.
+  for (NodeSetIter i = typing.redo.begin(); i != typing.redo.end(); ++i) {
+    Widen(graph, *i, context);
+  }
+}
+
+
+void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  NarrowVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  WidenVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Init(Node* node) {
+  if (OperatorProperties::HasValueOutput(node->op())) {
+    Visitor typing(this, MaybeHandle<Context>());
+    Bounds bounds = typing.TypeNode(node);
+    NodeProperties::SetBounds(node, bounds);
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+
+
+// Control operators.
+
+Bounds Typer::Visitor::TypeStart(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+// Common operators.
+
+Bounds Typer::Visitor::TypeParameter(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<int32_t>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(
+      Type::Of(static_cast<double>(OpParameter<int64_t>(node)), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<float>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
+  return Bounds(TypeConstant(OpParameter<Unique<Object> >(node).handle()));
+}
+
+
+Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypePhi(Node* node) {
+  int arity = OperatorProperties::GetValueInputCount(node->op());
+  Bounds bounds = OperandType(node, 0);
+  for (int i = 1; i < arity; ++i) {
+    bounds = Bounds::Either(bounds, OperandType(node, i), zone());
+  }
+  return bounds;
+}
+
+
+Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeControlEffect(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeValueEffect(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeFinish(Node* node) {
+  return OperandType(node, 0);
+}
+
+
+Bounds Typer::Visitor::TypeFrameState(Node* node) {
+  // TODO(rossberg): Ideally FrameState wouldn't have a value output.
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStateValues(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeCall(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeProjection(Node* node) {
+  // TODO(titzer): use the output type of the input to determine the bounds.
+  return Bounds::Unbounded(zone());
+}
+
+
+// JS comparison operators.
+
+#define DEFINE_METHOD(x)                       \
+  Bounds Typer::Visitor::Type##x(Node* node) { \
+    return Bounds(Type::Boolean(zone()));      \
+  }
+JS_COMPARE_BINOP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// JS bitwise operators.
+
+Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRight(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) {
+  return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone()));
+}
+
+
+// JS arithmetic operators.
+
+Bounds Typer::Visitor::TypeJSAdd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* lower =
+      left.lower->Is(Type::None()) || right.lower->Is(Type::None()) ?
+          Type::None(zone()) :
+      left.lower->Is(Type::Number()) && right.lower->Is(Type::Number()) ?
+          Type::SignedSmall(zone()) :
+      left.lower->Is(Type::String()) || right.lower->Is(Type::String()) ?
+          Type::String(zone()) : Type::None(zone());
+  Type* upper =
+      left.upper->Is(Type::None()) && right.upper->Is(Type::None()) ?
+          Type::None(zone()) :
+      left.upper->Is(Type::Number()) && right.upper->Is(Type::Number()) ?
+          Type::Number(zone()) :
+      left.upper->Is(Type::String()) || right.upper->Is(Type::String()) ?
+          Type::String(zone()) : Type::NumberOrString(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSSubtract(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSMultiply(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDivide(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSModulus(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+// JS unary operators.
+
+Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
+  return Bounds(Type::InternalizedString(zone()));
+}
+
+
+// JS conversion operators.
+
+Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToString(Node* node) {
+  return Bounds(Type::None(zone()), Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToName(Node* node) {
+  return Bounds(Type::None(zone()), Type::Name(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToObject(Node* node) {
+  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+// JS object operators.
+
+Bounds Typer::Visitor::TypeJSCreate(Node* node) {
+  return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+  Bounds object = OperandType(node, 0);
+  Bounds name = OperandType(node, 1);
+  Bounds result = Bounds::Unbounded(zone());
+  // TODO(rossberg): Use range types and sized array types to filter undefined.
+  if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) {
+    result.lower = Type::Union(
+        object.lower->AsArray()->Element(), Type::Undefined(zone()), zone());
+  }
+  if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) {
+    result.upper = Type::Union(
+        object.upper->AsArray()->Element(),  Type::Undefined(zone()), zone());
+  }
+  return result;
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+// JS context operators.
+
+Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
+  Bounds outer = OperandType(node, 0);
+  DCHECK(outer.upper->Maybe(Type::Internal()));
+  // TODO(rossberg): More precisely, instead of the above assertion, we should
+  // back-propagate the constraint that it has to be a subtype of Internal.
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  Type* context_type = outer.upper;
+  MaybeHandle<Context> context;
+  if (context_type->IsConstant()) {
+    context = Handle<Context>::cast(context_type->AsConstant()->Value());
+  }
+  // Walk context chain (as far as known), mirroring dynamic lookup.
+  // Since contexts are mutable, the information is only useful as a lower
+  // bound.
+  // TODO(rossberg): Could use scope info to fix upper bounds for constant
+  // bindings if we know that this code is never shared.
+  for (int i = access.depth(); i > 0; --i) {
+    if (context_type->IsContext()) {
+      context_type = context_type->AsContext()->Outer();
+      if (context_type->IsConstant()) {
+        context = Handle<Context>::cast(context_type->AsConstant()->Value());
+      }
+    } else if (!context.is_null()) {
+      context = handle(context.ToHandleChecked()->previous(), isolate());
+    }
+  }
+  if (context.is_null()) {
+    return Bounds::Unbounded(zone());
+  } else {
+    Handle<Object> value =
+        handle(context.ToHandleChecked()->get(access.index()), isolate());
+    Type* lower = TypeConstant(value);
+    return Bounds(lower, Type::Any(zone()));
+  }
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+  // TODO(rossberg): this is probably incorrect
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+// JS other operators.
+
+Bounds Typer::Visitor::TypeJSYield(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
+  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
+  Bounds fun = OperandType(node, 0);
+  Type* lower = fun.lower->IsFunction()
+      ? fun.lower->AsFunction()->Result() : Type::None(zone());
+  Type* upper = fun.upper->IsFunction()
+      ? fun.upper->AsFunction()->Result() : Type::Any(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+// Simplified operators.
+
+Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* s32 = Type::Signed32(zone());
+  Type* lower = arg.lower->Is(s32) ? arg.lower : s32;
+  Type* upper = arg.upper->Is(s32) ? arg.upper : s32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* u32 = Type::Unsigned32(zone());
+  Type* lower = arg.lower->Is(u32) ? arg.lower : u32;
+  Type* upper = arg.upper->Is(u32) ? arg.upper : u32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringAdd(Node* node) {
+  return Bounds(Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+  // TODO(titzer): type is type of input, representation is Word32.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+  return Bounds(Type::Integral32());  // TODO(titzer): add appropriate rep
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+  // TODO(titzer): type is type of input, representation is Float64.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+  // TODO(titzer): type is type of input, representation is Bit.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeLoadField(Node* node) {
+  return Bounds(FieldAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeLoadElement(Node* node) {
+  return Bounds(ElementAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeStoreField(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeStoreElement(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+// Machine operators.
+
+// TODO(rossberg): implement
+#define DEFINE_METHOD(x) \
+    Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); }
+MACHINE_OP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// Heap constants.
+
+Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
+  if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() &&
+      !context().is_null()) {
+    Handle<Context> native =
+        handle(context().ToHandleChecked()->native_context(), isolate());
+    if (*value == native->math_abs_fun()) {
+      return typer_->number_fun1_;  // TODO(rossberg): can't express overloading
+    } else if (*value == native->math_acos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_asin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan2_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_ceil_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_cos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_exp_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_floor_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_imul_fun()) {
+      return typer_->imul_fun_;
+    } else if (*value == native->math_log_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_pow_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_random_fun()) {
+      return typer_->number_fun0_;
+    } else if (*value == native->math_round_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sqrt_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_tan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->array_buffer_fun()) {
+      return typer_->array_buffer_fun_;
+    } else if (*value == native->int8_array_fun()) {
+      return typer_->int8_array_fun_;
+    } else if (*value == native->int16_array_fun()) {
+      return typer_->int16_array_fun_;
+    } else if (*value == native->int32_array_fun()) {
+      return typer_->int32_array_fun_;
+    } else if (*value == native->uint8_array_fun()) {
+      return typer_->uint8_array_fun_;
+    } else if (*value == native->uint16_array_fun()) {
+      return typer_->uint16_array_fun_;
+    } else if (*value == native->uint32_array_fun()) {
+      return typer_->uint32_array_fun_;
+    } else if (*value == native->float32_array_fun()) {
+      return typer_->float32_array_fun_;
+    } else if (*value == native->float64_array_fun()) {
+      return typer_->float64_array_fun_;
+    }
+  }
+  return Type::Constant(value, zone());
+}
+
+
+namespace {
+
+class TyperDecorator : public GraphDecorator {
+ public:
+  explicit TyperDecorator(Typer* typer) : typer_(typer) {}
+  virtual void Decorate(Node* node) { typer_->Init(node); }
+
+ private:
+  Typer* typer_;
+};
+
+}
+
+
+void Typer::DecorateGraph(Graph* graph) {
+  graph->AddDecorator(new (zone()) TyperDecorator(this));
+}
+
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
new file mode 100644
index 0000000..2957e4b
--- /dev/null
+++ b/src/compiler/typer.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPER_H_
+#define V8_COMPILER_TYPER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/opcodes.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer {
+ public:
+  explicit Typer(Zone* zone);
+
+  void Init(Node* node);
+  void Run(Graph* graph, MaybeHandle<Context> context);
+  void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context);
+  void Widen(Graph* graph, Node* node, MaybeHandle<Context> context);
+
+  void DecorateGraph(Graph* graph);
+
+  Zone* zone() { return zone_; }
+  Isolate* isolate() { return zone_->isolate(); }
+
+ private:
+  class Visitor;
+  class RunVisitor;
+  class NarrowVisitor;
+  class WidenVisitor;
+
+  Zone* zone_;
+  Type* number_fun0_;
+  Type* number_fun1_;
+  Type* number_fun2_;
+  Type* imul_fun_;
+  Type* array_buffer_fun_;
+  Type* int8_array_fun_;
+  Type* int16_array_fun_;
+  Type* int32_array_fun_;
+  Type* uint8_array_fun_;
+  Type* uint16_array_fun_;
+  Type* uint32_array_fun_;
+  Type* float32_array_fun_;
+  Type* float64_array_fun_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_TYPER_H_
diff --git a/src/compiler/value-numbering-reducer-unittest.cc b/src/compiler/value-numbering-reducer-unittest.cc
new file mode 100644
index 0000000..8db6458
--- /dev/null
+++ b/src/compiler/value-numbering-reducer-unittest.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const SimpleOperator kOp0(0, Operator::kNoProperties, 0, 1, "op0");
+const SimpleOperator kOp1(1, Operator::kNoProperties, 1, 1, "op1");
+
+}  // namespace
+
+
+class ValueNumberingReducerTest : public TestWithZone {
+ public:
+  ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+
+ protected:
+  Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+  Graph* graph() { return &graph_; }
+
+ private:
+  Graph graph_;
+  ValueNumberingReducer reducer_;
+};
+
+
+TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
+  Node* na = graph()->NewNode(&kOp0);
+  Node* nb = graph()->NewNode(&kOp0);
+  Node* n1 = graph()->NewNode(&kOp0, na);
+  Node* n2 = graph()->NewNode(&kOp0, nb);
+  EXPECT_FALSE(Reduce(n1).Changed());
+  EXPECT_FALSE(Reduce(n2).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
+  Node* n0 = graph()->NewNode(&kOp0);
+  Node* n1 = graph()->NewNode(&kOp1, n0);
+  EXPECT_FALSE(Reduce(n1).Changed());
+  n1->Kill();
+  EXPECT_FALSE(Reduce(graph()->NewNode(&kOp1, n0)).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
+  static const size_t kMaxInputCount = 16;
+  Node* inputs[kMaxInputCount];
+  for (size_t i = 0; i < arraysize(inputs); ++i) {
+    Operator::Opcode opcode = static_cast<Operator::Opcode>(
+        std::numeric_limits<Operator::Opcode>::max() - i);
+    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+        opcode, Operator::kNoProperties, 0, 1, "Operator"));
+  }
+  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+    const SimpleOperator op1(static_cast<Operator::Opcode>(input_count),
+                             Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op");
+    Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+    Reduction r1 = Reduce(n1);
+    EXPECT_FALSE(r1.Changed());
+
+    const SimpleOperator op2(static_cast<Operator::Opcode>(input_count),
+                             Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op");
+    Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
+    Reduction r2 = Reduce(n2);
+    EXPECT_TRUE(r2.Changed());
+    EXPECT_EQ(n1, r2.replacement());
+  }
+}
+
+
+TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
+  static const size_t kMaxInputCount = 16;
+  Node* inputs[kMaxInputCount];
+  for (size_t i = 0; i < arraysize(inputs); ++i) {
+    Operator::Opcode opcode = static_cast<Operator::Opcode>(
+        std::numeric_limits<Operator::Opcode>::max() - i);
+    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+        opcode, Operator::kNoProperties, 0, 1, "Operator"));
+  }
+  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+    const SimpleOperator op1(1, Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op1");
+    Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+    Reduction r = Reduce(n);
+    EXPECT_FALSE(r.Changed());
+
+    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(n, r.replacement());
+
+    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(n, r.replacement());
+  }
+}
+
+
+TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
+  Node* n = graph()->NewNode(&kOp0);
+  EXPECT_FALSE(Reduce(n).Changed());
+  EXPECT_FALSE(Reduce(n).Changed());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
new file mode 100644
index 0000000..595a4f3
--- /dev/null
+++ b/src/compiler/value-numbering-reducer.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/value-numbering-reducer.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+size_t HashCode(Node* node) { return node->op()->HashCode(); }
+
+
+bool Equals(Node* a, Node* b) {
+  DCHECK_NOT_NULL(a);
+  DCHECK_NOT_NULL(b);
+  DCHECK_NOT_NULL(a->op());
+  DCHECK_NOT_NULL(b->op());
+  if (!a->op()->Equals(b->op())) return false;
+  if (a->InputCount() != b->InputCount()) return false;
+  for (int j = 0; j < a->InputCount(); ++j) {
+    DCHECK_NOT_NULL(a->InputAt(j));
+    DCHECK_NOT_NULL(b->InputAt(j));
+    if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+
+class ValueNumberingReducer::Entry FINAL : public ZoneObject {
+ public:
+  Entry(Node* node, Entry* next) : node_(node), next_(next) {}
+
+  Node* node() const { return node_; }
+  Entry* next() const { return next_; }
+
+ private:
+  Node* node_;
+  Entry* next_;
+};
+
+
+ValueNumberingReducer::ValueNumberingReducer(Zone* zone) : zone_(zone) {
+  for (size_t i = 0; i < arraysize(buckets_); ++i) {
+    buckets_[i] = NULL;
+  }
+}
+
+
+ValueNumberingReducer::~ValueNumberingReducer() {}
+
+
+Reduction ValueNumberingReducer::Reduce(Node* node) {
+  Entry** head = &buckets_[HashCode(node) % arraysize(buckets_)];
+  for (Entry* entry = *head; entry; entry = entry->next()) {
+    if (entry->node()->IsDead()) continue;
+    if (entry->node() == node) return NoChange();
+    if (Equals(node, entry->node())) {
+      return Replace(entry->node());
+    }
+  }
+  *head = new (zone()) Entry(node, *head);
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
new file mode 100644
index 0000000..0d67e5d
--- /dev/null
+++ b/src/compiler/value-numbering-reducer.h
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+#define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ValueNumberingReducer FINAL : public Reducer {
+ public:
+  explicit ValueNumberingReducer(Zone* zone);
+  ~ValueNumberingReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  // TODO(turbofan): We currently use separate chaining with linked lists here,
+  // we may want to replace that with a more sophisticated data structure at
+  // some point in the future.
+  class Entry;
+  Entry* buckets_[117u];
+  Zone* zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
new file mode 100644
index 0000000..23cec7a
--- /dev/null
+++ b/src/compiler/verifier.cc
@@ -0,0 +1,455 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/verifier.h"
+
+#include <deque>
+#include <queue>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
+  Node::Uses uses = def->uses();
+  for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+    if (*it == use) return true;
+  }
+  return false;
+}
+
+
+static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
+  Node::Inputs inputs = use->inputs();
+  for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
+    if (*it == def) return true;
+  }
+  return false;
+}
+
+
+class Verifier::Visitor : public NullNodeVisitor {
+ public:
+  explicit Visitor(Zone* zone)
+      : reached_from_start(NodeSet::key_compare(),
+                           NodeSet::allocator_type(zone)),
+        reached_from_end(NodeSet::key_compare(),
+                         NodeSet::allocator_type(zone)) {}
+
+  // Fulfills the PreNodeCallback interface.
+  GenericGraphVisit::Control Pre(Node* node);
+
+  bool from_start;
+  NodeSet reached_from_start;
+  NodeSet reached_from_end;
+};
+
+
+GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
+  int value_count = OperatorProperties::GetValueInputCount(node->op());
+  int context_count = OperatorProperties::GetContextInputCount(node->op());
+  int frame_state_count =
+      OperatorProperties::GetFrameStateInputCount(node->op());
+  int effect_count = OperatorProperties::GetEffectInputCount(node->op());
+  int control_count = OperatorProperties::GetControlInputCount(node->op());
+
+  // Verify number of inputs matches up.
+  int input_count = value_count + context_count + frame_state_count +
+                    effect_count + control_count;
+  CHECK_EQ(input_count, node->InputCount());
+
+  // Verify that frame state has been inserted for the nodes that need it.
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    Node* frame_state = NodeProperties::GetFrameStateInput(node);
+    CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
+          // kFrameState uses undefined as a sentinel.
+          (node->opcode() == IrOpcode::kFrameState &&
+           frame_state->opcode() == IrOpcode::kHeapConstant));
+    CHECK(IsDefUseChainLinkPresent(frame_state, node));
+    CHECK(IsUseDefChainLinkPresent(frame_state, node));
+  }
+
+  // Verify all value inputs actually produce a value.
+  for (int i = 0; i < value_count; ++i) {
+    Node* value = NodeProperties::GetValueInput(node, i);
+    CHECK(OperatorProperties::HasValueOutput(value->op()));
+    CHECK(IsDefUseChainLinkPresent(value, node));
+    CHECK(IsUseDefChainLinkPresent(value, node));
+  }
+
+  // Verify all context inputs are value nodes.
+  for (int i = 0; i < context_count; ++i) {
+    Node* context = NodeProperties::GetContextInput(node);
+    CHECK(OperatorProperties::HasValueOutput(context->op()));
+    CHECK(IsDefUseChainLinkPresent(context, node));
+    CHECK(IsUseDefChainLinkPresent(context, node));
+  }
+
+  // Verify all effect inputs actually have an effect.
+  for (int i = 0; i < effect_count; ++i) {
+    Node* effect = NodeProperties::GetEffectInput(node);
+    CHECK(OperatorProperties::HasEffectOutput(effect->op()));
+    CHECK(IsDefUseChainLinkPresent(effect, node));
+    CHECK(IsUseDefChainLinkPresent(effect, node));
+  }
+
+  // Verify all control inputs are control nodes.
+  for (int i = 0; i < control_count; ++i) {
+    Node* control = NodeProperties::GetControlInput(node, i);
+    CHECK(OperatorProperties::HasControlOutput(control->op()));
+    CHECK(IsDefUseChainLinkPresent(control, node));
+    CHECK(IsUseDefChainLinkPresent(control, node));
+  }
+
+  // Verify all successors are projections if multiple value outputs exist.
+  if (OperatorProperties::GetValueOutputCount(node->op()) > 1) {
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+      CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
+            (*it)->opcode() == IrOpcode::kProjection ||
+            (*it)->opcode() == IrOpcode::kParameter);
+    }
+  }
+
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+      // Start has no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kEnd:
+      // End has no outputs.
+      CHECK(!OperatorProperties::HasValueOutput(node->op()));
+      CHECK(!OperatorProperties::HasEffectOutput(node->op()));
+      CHECK(!OperatorProperties::HasControlOutput(node->op()));
+      break;
+    case IrOpcode::kDead:
+      // Dead is never connected to the graph.
+      UNREACHABLE();
+    case IrOpcode::kBranch: {
+      // Branch uses are IfTrue and IfFalse.
+      Node::Uses uses = node->uses();
+      bool got_true = false, got_false = false;
+      for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+        CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
+              ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
+        if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true;
+        if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true;
+      }
+      // TODO(rossberg): Currently fails for various tests.
+      // CHECK(got_true && got_false);
+      break;
+    }
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+      CHECK_EQ(IrOpcode::kBranch,
+               NodeProperties::GetControlInput(node, 0)->opcode());
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      break;
+    case IrOpcode::kReturn:
+      // TODO(rossberg): check successor is End
+      break;
+    case IrOpcode::kThrow:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kParameter: {
+      // Parameters have the start node as inputs.
+      CHECK_EQ(1, input_count);
+      CHECK_EQ(IrOpcode::kStart,
+               NodeProperties::GetValueInput(node, 0)->opcode());
+      // Parameter has an input that produces enough values.
+      int index = OpParameter<int>(node);
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      // Currently, parameter indices start at -1 instead of 0.
+      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1);
+      break;
+    }
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kExternalConstant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kHeapConstant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kPhi: {
+      // Phi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(value_count,
+               OperatorProperties::GetControlInputCount(control->op()));
+      break;
+    }
+    case IrOpcode::kEffectPhi: {
+      // EffectPhi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(effect_count,
+               OperatorProperties::GetControlInputCount(control->op()));
+      break;
+    }
+    case IrOpcode::kFrameState:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kCall:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kProjection: {
+      // Projection has an input that produces enough values.
+      size_t index = OpParameter<size_t>(node);
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()),
+               static_cast<int>(index));
+      break;
+    }
+    default:
+      // TODO(rossberg): Check other node kinds.
+      break;
+  }
+
+  if (from_start) {
+    reached_from_start.insert(node);
+  } else {
+    reached_from_end.insert(node);
+  }
+
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void Verifier::Run(Graph* graph) {
+  Visitor visitor(graph->zone());
+
+  CHECK_NE(NULL, graph->start());
+  visitor.from_start = true;
+  graph->VisitNodeUsesFromStart(&visitor);
+  CHECK_NE(NULL, graph->end());
+  visitor.from_start = false;
+  graph->VisitNodeInputsFromEnd(&visitor);
+
+  // All control nodes reachable from end are reachable from start.
+  for (NodeSet::iterator it = visitor.reached_from_end.begin();
+       it != visitor.reached_from_end.end(); ++it) {
+    CHECK(!NodeProperties::IsControl(*it) ||
+          visitor.reached_from_start.count(*it));
+  }
+}
+
+
+static bool HasDominatingDef(Schedule* schedule, Node* node,
+                             BasicBlock* container, BasicBlock* use_block,
+                             int use_pos) {
+  BasicBlock* block = use_block;
+  while (true) {
+    while (use_pos >= 0) {
+      if (block->nodes_[use_pos] == node) return true;
+      use_pos--;
+    }
+    block = block->dominator_;
+    if (block == NULL) break;
+    use_pos = static_cast<int>(block->nodes_.size()) - 1;
+    if (node == block->control_input_) return true;
+  }
+  return false;
+}
+
+
+static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
+                                Node* node, int use_pos) {
+  for (int j = OperatorProperties::GetValueInputCount(node->op()) - 1; j >= 0;
+       j--) {
+    BasicBlock* use_block = block;
+    if (node->opcode() == IrOpcode::kPhi) {
+      use_block = use_block->PredecessorAt(j);
+      use_pos = static_cast<int>(use_block->nodes_.size()) - 1;
+    }
+    Node* input = node->InputAt(j);
+    if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
+                          use_pos)) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
+               node->id(), node->op()->mnemonic(), block->id(), j, input->id(),
+               input->op()->mnemonic());
+    }
+  }
+}
+
+
+void ScheduleVerifier::Run(Schedule* schedule) {
+  const int count = schedule->BasicBlockCount();
+  Zone tmp_zone(schedule->zone()->isolate());
+  Zone* zone = &tmp_zone;
+  BasicBlock* start = schedule->start();
+  BasicBlockVector* rpo_order = schedule->rpo_order();
+
+  // Verify the RPO order contains only blocks from this schedule.
+  CHECK_GE(count, static_cast<int>(rpo_order->size()));
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
+  }
+
+  // Verify RPO numbers of blocks.
+  CHECK_EQ(start, rpo_order->at(0));  // Start should be first.
+  for (size_t b = 0; b < rpo_order->size(); b++) {
+    BasicBlock* block = rpo_order->at(b);
+    CHECK_EQ(static_cast<int>(b), block->rpo_number_);
+    BasicBlock* dom = block->dominator_;
+    if (b == 0) {
+      // All blocks except start should have a dominator.
+      CHECK_EQ(NULL, dom);
+    } else {
+      // Check that the immediate dominator appears somewhere before the block.
+      CHECK_NE(NULL, dom);
+      CHECK_LT(dom->rpo_number_, block->rpo_number_);
+    }
+  }
+
+  // Verify that all blocks reachable from start are in the RPO.
+  BoolVector marked(count, false, zone);
+  {
+    ZoneQueue<BasicBlock*> queue(zone);
+    queue.push(start);
+    marked[start->id()] = true;
+    while (!queue.empty()) {
+      BasicBlock* block = queue.front();
+      queue.pop();
+      for (int s = 0; s < block->SuccessorCount(); s++) {
+        BasicBlock* succ = block->SuccessorAt(s);
+        if (!marked[succ->id()]) {
+          marked[succ->id()] = true;
+          queue.push(succ);
+        }
+      }
+    }
+  }
+  // Verify marked blocks are in the RPO.
+  for (int i = 0; i < count; i++) {
+    BasicBlock* block = schedule->GetBlockById(i);
+    if (marked[i]) {
+      CHECK_GE(block->rpo_number_, 0);
+      CHECK_EQ(block, rpo_order->at(block->rpo_number_));
+    }
+  }
+  // Verify RPO blocks are marked.
+  for (size_t b = 0; b < rpo_order->size(); b++) {
+    CHECK(marked[rpo_order->at(b)->id()]);
+  }
+
+  {
+    // Verify the dominance relation.
+    ZoneList<BitVector*> dominators(count, zone);
+    dominators.Initialize(count, zone);
+    dominators.AddBlock(NULL, count, zone);
+
+    // Compute a set of all the nodes that dominate a given node by using
+    // a forward fixpoint. O(n^2).
+    ZoneQueue<BasicBlock*> queue(zone);
+    queue.push(start);
+    dominators[start->id()] = new (zone) BitVector(count, zone);
+    while (!queue.empty()) {
+      BasicBlock* block = queue.front();
+      queue.pop();
+      BitVector* block_doms = dominators[block->id()];
+      BasicBlock* idom = block->dominator_;
+      if (idom != NULL && !block_doms->Contains(idom->id())) {
+        V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
+                 block->id(), idom->id());
+      }
+      for (int s = 0; s < block->SuccessorCount(); s++) {
+        BasicBlock* succ = block->SuccessorAt(s);
+        BitVector* succ_doms = dominators[succ->id()];
+
+        if (succ_doms == NULL) {
+          // First time visiting the node. S.doms = B U B.doms
+          succ_doms = new (zone) BitVector(count, zone);
+          succ_doms->CopyFrom(*block_doms);
+          succ_doms->Add(block->id());
+          dominators[succ->id()] = succ_doms;
+          queue.push(succ);
+        } else {
+          // Nth time visiting the successor. S.doms = S.doms ^ (B U B.doms)
+          bool had = succ_doms->Contains(block->id());
+          if (had) succ_doms->Remove(block->id());
+          if (succ_doms->IntersectIsChanged(*block_doms)) queue.push(succ);
+          if (had) succ_doms->Add(block->id());
+        }
+      }
+    }
+
+    // Verify the immediateness of dominators.
+    for (BasicBlockVector::iterator b = rpo_order->begin();
+         b != rpo_order->end(); ++b) {
+      BasicBlock* block = *b;
+      BasicBlock* idom = block->dominator_;
+      if (idom == NULL) continue;
+      BitVector* block_doms = dominators[block->id()];
+
+      for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
+        BasicBlock* dom = schedule->GetBlockById(it.Current());
+        if (dom != idom && !dominators[idom->id()]->Contains(dom->id())) {
+          V8_Fatal(__FILE__, __LINE__,
+                   "Block B%d is not immediately dominated by B%d", block->id(),
+                   idom->id());
+        }
+      }
+    }
+  }
+
+  // Verify phis are placed in the block of their control input.
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    for (BasicBlock::const_iterator i = (*b)->begin(); i != (*b)->end(); ++i) {
+      Node* phi = *i;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      // TODO(titzer): Nasty special case. Phis from RawMachineAssembler
+      // schedules don't have control inputs.
+      if (phi->InputCount() >
+          OperatorProperties::GetValueInputCount(phi->op())) {
+        Node* control = NodeProperties::GetControlInput(phi);
+        CHECK(control->opcode() == IrOpcode::kMerge ||
+              control->opcode() == IrOpcode::kLoop);
+        CHECK_EQ((*b), schedule->block(control));
+      }
+    }
+  }
+
+  // Verify that all uses are dominated by their definitions.
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    BasicBlock* block = *b;
+
+    // Check inputs to control for this block.
+    Node* control = block->control_input_;
+    if (control != NULL) {
+      CHECK_EQ(block, schedule->block(control));
+      CheckInputsDominate(schedule, block, control,
+                          static_cast<int>(block->nodes_.size()) - 1);
+    }
+    // Check inputs for all nodes in the block.
+    for (size_t i = 0; i < block->nodes_.size(); i++) {
+      Node* node = block->nodes_[i];
+      CheckInputsDominate(schedule, block, node, static_cast<int>(i) - 1);
+    }
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
new file mode 100644
index 0000000..b5c028e
--- /dev/null
+++ b/src/compiler/verifier.h
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VERIFIER_H_
+#define V8_COMPILER_VERIFIER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Graph;
+class Schedule;
+
+// Verifies properties of a graph, such as the well-formedness of inputs to
+// each node, etc.
+class Verifier {
+ public:
+  static void Run(Graph* graph);
+
+ private:
+  class Visitor;
+  DISALLOW_COPY_AND_ASSIGN(Verifier);
+};
+
+// Verifies properties of a schedule, such as dominance, phi placement, etc.
+class ScheduleVerifier {
+ public:
+  static void Run(Schedule* schedule);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_VERIFIER_H_
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
new file mode 100644
index 0000000..f71d3bf
--- /dev/null
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -0,0 +1,1024 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(turbofan): Cleanup these hacks.
+enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
+
+
+struct Immediate64 {
+  uint64_t value;
+  Handle<Object> handle;
+  ExternalReference reference;
+  Immediate64Type type;
+};
+
+
+enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
+
+
+struct RegisterOrOperand {
+  RegisterOrOperand() : operand(no_reg, 0) {}
+  Register reg;
+  DoubleRegister double_reg;
+  Operand operand;
+  RegisterOrOperandType type;
+};
+
+
+// Adds X64 specific methods for decoding operands.
+class X64OperandConverter : public InstructionOperandConverter {
+ public:
+  X64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  RegisterOrOperand InputRegisterOrOperand(int index) {
+    return ToRegisterOrOperand(instr_->InputAt(index));
+  }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  RegisterOrOperand OutputRegisterOrOperand() {
+    return ToRegisterOrOperand(instr_->Output());
+  }
+
+  Immediate64 InputImmediate64(int index) {
+    return ToImmediate64(instr_->InputAt(index));
+  }
+
+  Immediate64 ToImmediate64(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    Immediate64 immediate;
+    immediate.value = 0xbeefdeaddeefbeed;
+    immediate.type = kImm64Value;
+    switch (constant.type()) {
+      case Constant::kInt32:
+      case Constant::kInt64:
+        immediate.value = constant.ToInt64();
+        return immediate;
+      case Constant::kFloat64:
+        immediate.type = kImm64Handle;
+        immediate.handle =
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
+        return immediate;
+      case Constant::kExternalReference:
+        immediate.type = kImm64Reference;
+        immediate.reference = constant.ToExternalReference();
+        return immediate;
+      case Constant::kHeapObject:
+        immediate.type = kImm64Handle;
+        immediate.handle = constant.ToHeapObject();
+        return immediate;
+    }
+    UNREACHABLE();
+    return immediate;
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kInt64:
+      case Constant::kFloat64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result = ToRegisterOrOperand(op, extra);
+    DCHECK_EQ(kOperand, result.type);
+    return result.operand;
+  }
+
+  RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result;
+    if (op->IsRegister()) {
+      DCHECK(extra == 0);
+      result.type = kRegister;
+      result.reg = ToRegister(op);
+      return result;
+    } else if (op->IsDoubleRegister()) {
+      DCHECK(extra == 0);
+      DCHECK(extra == 0);
+      result.type = kDoubleRegister;
+      result.double_reg = ToDoubleRegister(op);
+      return result;
+    }
+
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+
+    result.type = kOperand;
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    result.operand =
+        Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+    return result;
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I: {
+        *first_input += 2;
+        Register index = InputRegister(offset + 1);
+        return Operand(InputRegister(offset + 0), index, times_1,
+                       0);  // TODO(dcarney): K != 0
+      }
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg, 0);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+#define ASSEMBLE_BINOP(asm_instr)                            \
+  do {                                                       \
+    if (HasImmediateInput(instr, 1)) {                       \
+      RegisterOrOperand input = i.InputRegisterOrOperand(0); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(input.reg, i.InputImmediate(1));        \
+      } else {                                               \
+        __ asm_instr(input.operand, i.InputImmediate(1));    \
+      }                                                      \
+    } else {                                                 \
+      RegisterOrOperand input = i.InputRegisterOrOperand(1); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(i.InputRegister(0), input.reg);         \
+      } else {                                               \
+        __ asm_instr(i.InputRegister(0), input.operand);     \
+      }                                                      \
+    }                                                        \
+  } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
+  do {                                                                   \
+    if (HasImmediateInput(instr, 1)) {                                   \
+      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+    } else {                                                             \
+      __ asm_instr##_cl(i.OutputRegister());                             \
+    }                                                                    \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  X64OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ Call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ Call(Operand(reg, entry));
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+        __ Assert(equal, kWrongFunctionContext);
+      }
+      __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ jmp(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kX64Add32:
+      ASSEMBLE_BINOP(addl);
+      break;
+    case kX64Add:
+      ASSEMBLE_BINOP(addq);
+      break;
+    case kX64Sub32:
+      ASSEMBLE_BINOP(subl);
+      break;
+    case kX64Sub:
+      ASSEMBLE_BINOP(subq);
+      break;
+    case kX64And32:
+      ASSEMBLE_BINOP(andl);
+      break;
+    case kX64And:
+      ASSEMBLE_BINOP(andq);
+      break;
+    case kX64Cmp32:
+      ASSEMBLE_BINOP(cmpl);
+      break;
+    case kX64Cmp:
+      ASSEMBLE_BINOP(cmpq);
+      break;
+    case kX64Test32:
+      ASSEMBLE_BINOP(testl);
+      break;
+    case kX64Test:
+      ASSEMBLE_BINOP(testq);
+      break;
+    case kX64Imul32:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg);
+        } else {
+          __ imull(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Imul:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg);
+        } else {
+          __ imulq(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Idiv32:
+      __ cdq();
+      __ idivl(i.InputRegister(1));
+      break;
+    case kX64Idiv:
+      __ cqo();
+      __ idivq(i.InputRegister(1));
+      break;
+    case kX64Udiv32:
+      __ xorl(rdx, rdx);
+      __ divl(i.InputRegister(1));
+      break;
+    case kX64Udiv:
+      __ xorq(rdx, rdx);
+      __ divq(i.InputRegister(1));
+      break;
+    case kX64Not: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notq(output.reg);
+      } else {
+        __ notq(output.operand);
+      }
+      break;
+    }
+    case kX64Not32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notl(output.reg);
+      } else {
+        __ notl(output.operand);
+      }
+      break;
+    }
+    case kX64Neg: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negq(output.reg);
+      } else {
+        __ negq(output.operand);
+      }
+      break;
+    }
+    case kX64Neg32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negl(output.reg);
+      } else {
+        __ negl(output.operand);
+      }
+      break;
+    }
+    case kX64Or32:
+      ASSEMBLE_BINOP(orl);
+      break;
+    case kX64Or:
+      ASSEMBLE_BINOP(orq);
+      break;
+    case kX64Xor32:
+      ASSEMBLE_BINOP(xorl);
+      break;
+    case kX64Xor:
+      ASSEMBLE_BINOP(xorq);
+      break;
+    case kX64Shl32:
+      ASSEMBLE_SHIFT(shll, 5);
+      break;
+    case kX64Shl:
+      ASSEMBLE_SHIFT(shlq, 6);
+      break;
+    case kX64Shr32:
+      ASSEMBLE_SHIFT(shrl, 5);
+      break;
+    case kX64Shr:
+      ASSEMBLE_SHIFT(shrq, 6);
+      break;
+    case kX64Sar32:
+      ASSEMBLE_SHIFT(sarl, 5);
+      break;
+    case kX64Sar:
+      ASSEMBLE_SHIFT(sarq, 6);
+      break;
+    case kX64Ror32:
+      ASSEMBLE_SHIFT(rorl, 5);
+      break;
+    case kX64Ror:
+      ASSEMBLE_SHIFT(rorq, 6);
+      break;
+    case kSSEFloat64Cmp: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(1);
+      if (input.type == kDoubleRegister) {
+        __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
+      } else {
+        __ ucomisd(i.InputDoubleRegister(0), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      __ subq(rsp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(rsp, 0));
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(rsp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use rax.
+      __ fnstsw_ax();
+      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+        __ sahf();
+      } else {
+        __ shrl(rax, Immediate(8));
+        __ andl(rax, Immediate(0xFF));
+        __ pushq(rax);
+        __ popfq();
+      }
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(rsp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+      __ addq(rsp, Immediate(kDoubleSize));
+      break;
+    }
+    case kSSEFloat64Sqrt: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+      } else {
+        __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64ToInt32: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ cvttsd2si(i.OutputRegister(), input.double_reg);
+      } else {
+        __ cvttsd2si(i.OutputRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64ToUint32: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ cvttsd2siq(i.OutputRegister(), input.double_reg);
+      } else {
+        __ cvttsd2siq(i.OutputRegister(), input.operand);
+      }
+      __ andl(i.OutputRegister(), i.OutputRegister());  // clear upper bits.
+      // TODO(turbofan): generated code should not look at the upper 32 bits
+      // of the result, but those bits could escape to the outside world.
+      break;
+    }
+    case kSSEInt32ToFloat64: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+      } else {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEUint32ToFloat64: {
+      // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+      __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    }
+    case kX64Movsxbl:
+      __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movzxbl:
+      __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movb: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ movb(operand, Immediate(i.InputInt8(index)));
+      } else {
+        __ movb(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kX64Movsxwl:
+      __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movzxwl:
+      __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movw: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ movw(operand, Immediate(i.InputInt16(index)));
+      } else {
+        __ movw(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kX64Movl:
+      if (instr->HasOutput()) {
+        if (instr->addressing_mode() == kMode_None) {
+          RegisterOrOperand input = i.InputRegisterOrOperand(0);
+          if (input.type == kRegister) {
+            __ movl(i.OutputRegister(), input.reg);
+          } else {
+            __ movl(i.OutputRegister(), input.operand);
+          }
+        } else {
+          __ movl(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ movl(operand, i.InputImmediate(index));
+        } else {
+          __ movl(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kX64Movsxlq: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ movsxlq(i.OutputRegister(), input.reg);
+      } else {
+        __ movsxlq(i.OutputRegister(), input.operand);
+      }
+      break;
+    }
+    case kX64Movq:
+      if (instr->HasOutput()) {
+        __ movq(i.OutputRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ movq(operand, i.InputImmediate(index));
+        } else {
+          __ movq(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kX64Movss:
+      if (instr->HasOutput()) {
+        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+        __ movss(operand, xmm0);
+      }
+      break;
+    case kX64Movsd:
+      if (instr->HasOutput()) {
+        __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ movsd(operand, i.InputDoubleRegister(index));
+      }
+      break;
+    case kX64Push:
+      if (HasImmediateInput(instr, 0)) {
+        __ pushq(i.InputImmediate(0));
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ pushq(input.reg);
+        } else {
+          __ pushq(input.operand);
+        }
+      }
+      break;
+    case kX64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ movsxlq(index, index);
+      __ movq(Operand(object, index, times_1, 0), value);
+      __ leaq(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+    case kOverflow:
+      __ j(overflow, tlabel);
+      break;
+    case kNotOverflow:
+      __ j(no_overflow, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 64-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+    case kOverflow:
+      cc = overflow;
+      break;
+    case kNotOverflow:
+      cc = no_overflow;
+      break;
+  }
+  __ bind(&check);
+  __ setcc(cc, reg);
+  __ movzxbl(reg, reg);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ pushq(rbp);
+    __ movq(rbp, rsp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ pushq(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
+      __ movp(rcx, args.GetReceiverOperand());
+      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+      __ j(not_equal, &ok, Label::kNear);
+      __ movp(rcx, GlobalObjectOperand());
+      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
+      __ movp(args.GetReceiverOperand(), rcx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ subq(rsp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ addq(rsp, Immediate(stack_slots * kPointerSize));
+      }
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ popq(Register::from_code(i));
+        }
+      }
+      __ popq(rbp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+      __ popq(rbp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+    __ popq(rbp);       // Pop caller's frame pointer.
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ movq(g.ToRegister(destination), src);
+    } else {
+      __ movq(g.ToOperand(destination), src);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ movq(dst, src);
+    } else {
+      // Spill on demand to use a temporary register for memory-to-memory
+      // moves.
+      Register tmp = kScratchRegister;
+      Operand dst = g.ToOperand(destination);
+      __ movq(tmp, src);
+      __ movq(dst, tmp);
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : kScratchRegister;
+      Immediate64 imm = g.ToImmediate64(constant_source);
+      switch (imm.type) {
+        case kImm64Value:
+          __ Set(dst, imm.value);
+          break;
+        case kImm64Reference:
+          __ Move(dst, imm.reference);
+          break;
+        case kImm64Handle:
+          __ Move(dst, imm.handle);
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    } else {
+      __ movq(kScratchRegister,
+              bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
+      if (destination->IsDoubleRegister()) {
+        __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ xchgq(src, dst);
+  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+             (source->IsDoubleStackSlot() &&
+              destination->IsDoubleStackSlot())) {
+    // Memory-memory.
+    Register tmp = kScratchRegister;
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ movq(tmp, dst);
+    __ xchgq(tmp, src);
+    __ movq(dst, tmp);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace compiler
+}  // namespace v8
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
new file mode 100644
index 0000000..dfad203
--- /dev/null
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -0,0 +1,101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(X64Add)                        \
+  V(X64Add32)                      \
+  V(X64And)                        \
+  V(X64And32)                      \
+  V(X64Cmp)                        \
+  V(X64Cmp32)                      \
+  V(X64Test)                       \
+  V(X64Test32)                     \
+  V(X64Or)                         \
+  V(X64Or32)                       \
+  V(X64Xor)                        \
+  V(X64Xor32)                      \
+  V(X64Sub)                        \
+  V(X64Sub32)                      \
+  V(X64Imul)                       \
+  V(X64Imul32)                     \
+  V(X64Idiv)                       \
+  V(X64Idiv32)                     \
+  V(X64Udiv)                       \
+  V(X64Udiv32)                     \
+  V(X64Not)                        \
+  V(X64Not32)                      \
+  V(X64Neg)                        \
+  V(X64Neg32)                      \
+  V(X64Shl)                        \
+  V(X64Shl32)                      \
+  V(X64Shr)                        \
+  V(X64Shr32)                      \
+  V(X64Sar)                        \
+  V(X64Sar32)                      \
+  V(X64Ror)                        \
+  V(X64Ror32)                      \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
+  V(SSEFloat64ToInt32)             \
+  V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat64)             \
+  V(SSEUint32ToFloat64)            \
+  V(X64Movsxbl)                    \
+  V(X64Movzxbl)                    \
+  V(X64Movb)                       \
+  V(X64Movsxwl)                    \
+  V(X64Movzxwl)                    \
+  V(X64Movw)                       \
+  V(X64Movl)                       \
+  V(X64Movsxlq)                    \
+  V(X64Movq)                       \
+  V(X64Movsd)                      \
+  V(X64Movss)                      \
+  V(X64Push)                       \
+  V(X64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MR)   /* [%r1] */                  \
+  V(MRI)  /* [%r1 + K] */              \
+  V(MR1I) /* [%r1 + %r2 + K] */        \
+  V(MR2I) /* [%r1 + %r2*2 + K] */      \
+  V(MR4I) /* [%r1 + %r2*4 + K] */      \
+  V(MR8I) /* [%r1 + %r2*8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
diff --git a/src/compiler/x64/instruction-selector-x64-unittest.cc b/src/compiler/x64/instruction-selector-x64-unittest.cc
new file mode 100644
index 0000000..22f0bce
--- /dev/null
+++ b/src/compiler/x64/instruction-selector-x64-unittest.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+  StreamBuilder m(this, kMachInt64, kMachInt32);
+  m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+  StreamBuilder m(this, kMachUint64, kMachUint32);
+  m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kX64Movsxbl, kX64Movb},
+    {kMachUint8, kX64Movzxbl, kX64Movb},
+    {kMachInt16, kX64Movsxwl, kX64Movw},
+    {kMachUint16, kX64Movzxwl, kX64Movw},
+    {kMachInt32, kX64Movl, kX64Movl},
+    {kMachUint32, kX64Movl, kX64Movl},
+    {kMachInt64, kX64Movq, kX64Movq},
+    {kMachUint64, kX64Movq, kX64Movq},
+    {kMachFloat32, kX64Movss, kX64Movss},
+    {kMachFloat64, kX64Movsd, kX64Movsd}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
new file mode 100644
index 0000000..5fe7bad
--- /dev/null
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -0,0 +1,723 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X64-specific methods for generating operands.
+class X64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit X64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, rdx);
+  }
+
+  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  bool CanBeImmediate64(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      case IrOpcode::kNumberConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+        return !isolate()->heap()->InNewSpace(*value.handle());
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  // TODO(titzer): signed/unsigned small loads
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kX64Movss;
+      break;
+    case kRepFloat64:
+      opcode = kX64Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
+      break;
+    case kRepWord32:
+      opcode = kX64Movl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kX64Movq;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // load [#base + %index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
+    Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
+         g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  InstructionOperand* val;
+  if (g.CanBeImmediate(value)) {
+    val = g.UseImmediate(value);
+  } else if (rep == kRepWord8 || rep == kRepBit) {
+    val = g.UseByteRegister(value);
+  } else {
+    val = g.UseRegister(value);
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kX64Movss;
+      break;
+    case kRepFloat64:
+      opcode = kX64Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kX64Movb;
+      break;
+    case kRepWord16:
+      opcode = kX64Movw;
+      break;
+    case kRepWord32:
+      opcode = kX64Movl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kX64Movq;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // store [#base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(index), g.UseImmediate(base), val);
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(m.right().node())) {
+    inputs[input_count++] = g.Use(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.Use(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineSameAsFirst(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kX64And32);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kX64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kX64Or32);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kX64Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  X64OperandGenerator g(this);
+  Uint32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kX64Xor32);
+  }
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  X64OperandGenerator g(this);
+  Uint64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kX64Xor);
+  }
+}
+
+
+// Shared routine for multiple 32-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
+static void VisitWord32Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+// Shared routine for multiple 64-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
+static void VisitWord64Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int64BinopMatcher m(node);
+    if (m.right().IsWord64And()) {
+      Int64BinopMatcher mright(right);
+      if (mright.right().Is(0x3F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitWord32Shift(this, node, kX64Shl32);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitWord64Shift(this, node, kX64Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitWord32Shift(this, node, kX64Shr32);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitWord64Shift(this, node, kX64Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitWord32Shift(this, node, kX64Sar32);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitWord64Shift(this, node, kX64Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitWord32Shift(this, node, kX64Ror32);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitWord64Shift(this, node, kX64Ror);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kX64Add32);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop(this, node, kX64Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kX64Sub32);
+  }
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kX64Sub);
+  }
+}
+
+
+static void VisitMul(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
+                   g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
+                   g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitMul(this, node, kX64Imul32);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitMul(this, node, kX64Imul);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(rax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+       temps);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kX64Add32, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kX64Sub32, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kX64Cmp32, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kX64Test32, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt64Sub:
+      return VisitWordCompare(this, node, kX64Cmp, cont, false);
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kX64Test, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  X64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  X64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor = GetFrameStateDescriptor(
+        call->InputAt(static_cast<int>(descriptor->InputCount())));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true);
+
+  // TODO(dcarney): stack alignment for c calls.
+  // TODO(dcarney): shadow space on window for c calls.
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    // TODO(titzer): handle pushing double parameters.
+    Emit(kX64Push, NULL,
+         g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
new file mode 100644
index 0000000..8175bc6
--- /dev/null
+++ b/src/compiler/x64/linkage-x64.cc
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef _WIN64
+const bool kWin64 = true;
+#else
+const bool kWin64 = false;
+#endif
+
+struct X64LinkageHelperTraits {
+  static Register ReturnValueReg() { return rax; }
+  static Register ReturnValue2Reg() { return rdx; }
+  static Register JSCallFunctionReg() { return rdi; }
+  static Register ContextReg() { return rsi; }
+  static Register RuntimeCallFunctionReg() { return rbx; }
+  static Register RuntimeCallArgCountReg() { return rax; }
+  static RegList CCalleeSaveRegisters() {
+    if (kWin64) {
+      return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
+             r14.bit() | r15.bit();
+    } else {
+      return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
+    }
+  }
+  static Register CRegisterParameter(int i) {
+    if (kWin64) {
+      static Register register_parameters[] = {rcx, rdx, r8, r9};
+      return register_parameters[i];
+    } else {
+      static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
+      return register_parameters[i];
+    }
+  }
+  static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
+};
+
+typedef LinkageHelper<X64LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8