Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index ebd2789..722bbf0 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -6,9 +6,9 @@
 
 #include "src/contexts.h"
 #include "src/frames.h"
+#include "src/handles-inl.h"
 #include "src/heap/heap.h"
 #include "src/type-cache.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -268,20 +268,16 @@
 
 // static
 FieldAccess AccessBuilder::ForArgumentsLength() {
-  int offset =
-      JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
+                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForArgumentsCallee() {
-  int offset =
-      JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
+                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
   return access;
 }
 
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 612170e..4a2a857 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -8,9 +8,9 @@
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-info.h"
 #include "src/field-index-inl.h"
+#include "src/field-type.h"
 #include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
 #include "src/type-cache.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -232,6 +232,9 @@
   // Compute the receiver type.
   Handle<Map> receiver_map = map;
 
+  // Property lookups require the name to be internalized.
+  name = isolate()->factory()->InternalizeName(name);
+
   // We support fast inline cases for certain JSObject getters.
   if (access_mode == AccessMode::kLoad &&
       LookupSpecialFieldAccessor(map, name, access_info)) {
@@ -242,7 +245,7 @@
   do {
     // Lookup the named property on the {map}.
     Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
-    int const number = descriptors->SearchWithCache(*name, *map);
+    int const number = descriptors->SearchWithCache(isolate(), *name, *map);
     if (number != DescriptorArray::kNotFound) {
       PropertyDetails const details = descriptors->GetDetails(number);
       if (access_mode == AccessMode::kStore) {
@@ -277,8 +280,7 @@
           // Extract the field type from the property details (make sure its
           // representation is TaggedPointer to reflect the heap object case).
           field_type = Type::Intersect(
-              Type::Convert<HeapType>(
-                  handle(descriptors->GetFieldType(number), isolate()), zone()),
+              descriptors->GetFieldType(number)->Convert(zone()),
               Type::TaggedPointer(), zone());
           if (field_type->Is(Type::None())) {
             // Store is not safe if the field type was cleared.
@@ -454,10 +456,7 @@
       // Extract the field type from the property details (make sure its
       // representation is TaggedPointer to reflect the heap object case).
       field_type = Type::Intersect(
-          Type::Convert<HeapType>(
-              handle(
-                  transition_map->instance_descriptors()->GetFieldType(number),
-                  isolate()),
+          transition_map->instance_descriptors()->GetFieldType(number)->Convert(
               zone()),
           Type::TaggedPointer(), zone());
       if (field_type->Is(Type::None())) {
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 9b074b0..bdf4c47 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -206,6 +206,19 @@
       : OutOfLineCode(gen),
         object_(object),
         index_(index),
+        index_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        index_(no_reg),
+        index_immediate_(index),
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
@@ -215,24 +228,36 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Push(lr);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
-    __ add(scratch1_, object_, index_);
+                         remembered_set_action, save_fp_mode);
+    if (index_.is(no_reg)) {
+      __ add(scratch1_, object_, Operand(index_immediate_));
+    } else {
+      DCHECK_EQ(0, index_immediate_);
+      __ add(scratch1_, object_, Operand(index_));
+    }
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(lr);
+    }
   }
 
  private:
   Register const object_;
   Register const index_;
+  int32_t const index_immediate_;  // Valid if index_.is(no_reg).
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -449,11 +474,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -514,6 +534,13 @@
       __ mov(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -522,19 +549,43 @@
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
       Register object = i.InputRegister(0);
-      Register index = i.InputRegister(1);
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
-      auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
-                                                   scratch0, scratch1, mode);
-      __ str(value, MemOperand(object, index));
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_Offset_RI) {
+        int32_t index = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+                                                scratch0, scratch1, mode);
+        __ str(value, MemOperand(object, index));
+      } else {
+        DCHECK_EQ(kMode_Offset_RR, addressing_mode);
+        Register index(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+                                                scratch0, scratch1, mode);
+        __ str(value, MemOperand(object, index));
+      }
       __ CheckPageFlag(object, scratch0,
                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
                        ool->entry());
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = sp;
+      } else {
+        base = fp;
+      }
+      __ add(i.OutputRegister(0), base, Operand(offset.offset()));
+      break;
+    }
     case kArmAdd:
       __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
              i.OutputSBit());
@@ -622,6 +673,13 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSbfx: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmSxtb:
       __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -658,6 +716,12 @@
                i.InputInt32(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArmRbit: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ rbit(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmClz:
       __ clz(i.OutputRegister(), i.InputRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -831,6 +895,20 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmVcvtF32S32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF32U32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmVcvtF64S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
@@ -845,6 +923,20 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmVcvtS32F32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtU32F32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmVcvtS32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
@@ -1098,8 +1190,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 401100b..50fa555 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -36,6 +36,7 @@
   V(ArmMvn)                        \
   V(ArmBfc)                        \
   V(ArmUbfx)                       \
+  V(ArmSbfx)                       \
   V(ArmSxtb)                       \
   V(ArmSxth)                       \
   V(ArmSxtab)                      \
@@ -43,6 +44,7 @@
   V(ArmUxtb)                       \
   V(ArmUxth)                       \
   V(ArmUxtab)                      \
+  V(ArmRbit)                       \
   V(ArmUxtah)                      \
   V(ArmVcmpF32)                    \
   V(ArmVaddF32)                    \
@@ -76,8 +78,12 @@
   V(ArmVrintnF64)                  \
   V(ArmVcvtF32F64)                 \
   V(ArmVcvtF64F32)                 \
+  V(ArmVcvtF32S32)                 \
+  V(ArmVcvtF32U32)                 \
   V(ArmVcvtF64S32)                 \
   V(ArmVcvtF64U32)                 \
+  V(ArmVcvtS32F32)                 \
+  V(ArmVcvtU32F32)                 \
   V(ArmVcvtS32F64)                 \
   V(ArmVcvtU32F64)                 \
   V(ArmVmovLowU32F64)              \
@@ -100,7 +106,6 @@
   V(ArmPush)                       \
   V(ArmPoke)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index f36802c..d950e8c 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -38,6 +38,7 @@
     case kArmMvn:
     case kArmBfc:
     case kArmUbfx:
+    case kArmSbfx:
     case kArmSxtb:
     case kArmSxth:
     case kArmSxtab:
@@ -46,6 +47,7 @@
     case kArmUxth:
     case kArmUxtab:
     case kArmUxtah:
+    case kArmRbit:
     case kArmVcmpF32:
     case kArmVaddF32:
     case kArmVsubF32:
@@ -78,8 +80,12 @@
     case kArmVrintnF64:
     case kArmVcvtF32F64:
     case kArmVcvtF64F32:
+    case kArmVcvtF32S32:
+    case kArmVcvtF32U32:
     case kArmVcvtF64S32:
     case kArmVcvtF64U32:
+    case kArmVcvtS32F32:
+    case kArmVcvtU32F32:
     case kArmVcvtS32F64:
     case kArmVcvtU32F64:
     case kArmVmovLowU32F64:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index f3deae7..14b30b1 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -327,8 +327,9 @@
     case MachineRepresentation::kWord32:
       opcode = kArmLdr;
       break;
-    case MachineRepresentation::kNone:  // Fall through.
-    case MachineRepresentation::kWord64:
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
   }
@@ -355,10 +356,19 @@
 
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(index);
+    // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
+      inputs[input_count++] = g.UseImmediate(index);
+      addressing_mode = kMode_Offset_RI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(index);
+      addressing_mode = kMode_Offset_RR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -380,6 +390,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -402,8 +413,9 @@
       case MachineRepresentation::kWord32:
         opcode = kArmStr;
         break;
-      case MachineRepresentation::kNone:  // Fall through.
-      case MachineRepresentation::kWord64:
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
     }
@@ -442,9 +454,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -483,9 +496,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -551,43 +565,67 @@
   if (m.right().HasValue()) {
     uint32_t const value = m.right().Value();
     uint32_t width = base::bits::CountPopulation32(value);
-    uint32_t msb = base::bits::CountLeadingZeros32(value);
-    // Try to interpret this AND as UBFX.
-    if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
-      DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
-      if (m.left().IsWord32Shr()) {
-        Int32BinopMatcher mleft(m.left().node());
-        if (mleft.right().IsInRange(0, 31)) {
-          // UBFX cannot extract bits past the register size, however since
-          // shifting the original value would have introduced some zeros we can
-          // still use UBFX with a smaller mask and the remaining bits will be
-          // zeros.
-          uint32_t const lsb = mleft.right().Value();
-          return EmitUbfx(this, node, mleft.left().node(), lsb,
-                          std::min(width, 32 - lsb));
+    uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
+
+    // Try to merge SHR operations on the left hand input into this AND.
+    if (m.left().IsWord32Shr()) {
+      Int32BinopMatcher mshr(m.left().node());
+      if (mshr.right().HasValue()) {
+        uint32_t const shift = mshr.right().Value();
+
+        if (((shift == 8) || (shift == 16) || (shift == 24)) &&
+            ((value == 0xff) || (value == 0xffff))) {
+          // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
+          // bytewise rotation.
+          Emit((value == 0xff) ? kArmUxtb : kArmUxth,
+               g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
+               g.TempImmediate(mshr.right().Value()));
+          return;
+        } else if (IsSupported(ARMv7) && (width != 0) &&
+                   ((leading_zeros + width) == 32)) {
+          // Merge Shr into And by emitting a UBFX instruction.
+          DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+          if ((1 <= shift) && (shift <= 31)) {
+            // UBFX cannot extract bits past the register size, however since
+            // shifting the original value would have introduced some zeros we
+            // can still use UBFX with a smaller mask and the remaining bits
+            // will be zeros.
+            EmitUbfx(this, node, mshr.left().node(), shift,
+                     std::min(width, 32 - shift));
+            return;
+          }
         }
       }
-      return EmitUbfx(this, node, m.left().node(), 0, width);
+    } else if (value == 0xffff) {
+      // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
+      // better than AND 0xff for this operation.
+      Emit(kArmUxth, g.DefineAsRegister(m.node()),
+           g.UseRegister(m.left().node()), g.TempImmediate(0));
+      return;
     }
-    // Try to interpret this AND as BIC.
     if (g.CanBeImmediate(~value)) {
+      // Emit BIC for this AND by inverting the immediate value first.
       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.TempImmediate(~value));
       return;
     }
-    // Try to interpret this AND as UXTH.
-    if (value == 0xffff) {
-      Emit(kArmUxth, g.DefineAsRegister(m.node()),
-           g.UseRegister(m.left().node()), g.TempImmediate(0));
-      return;
-    }
-    // Try to interpret this AND as BFC.
-    if (IsSupported(ARMv7)) {
+    if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
+      // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
+      // replace this AND with UBFX. Other contiguous bit patterns have already
+      // been handled by BIC or will be handled by AND.
+      if ((width != 0) && ((leading_zeros + width) == 32) &&
+          (9 <= leading_zeros) && (leading_zeros <= 23)) {
+        DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+        EmitUbfx(this, node, m.left().node(), 0, width);
+        return;
+      }
+
       width = 32 - width;
-      msb = base::bits::CountLeadingZeros32(~value);
+      leading_zeros = base::bits::CountLeadingZeros32(~value);
       uint32_t lsb = base::bits::CountTrailingZeros32(~value);
-      if (msb + width + lsb == 32) {
+      if ((leading_zeros + width + lsb) == 32) {
+        // This AND can be replaced with BFC.
         Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
              g.TempImmediate(lsb), g.TempImmediate(width));
         return;
@@ -699,14 +737,23 @@
   Int32BinopMatcher m(node);
   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().Is(16) && m.right().Is(16)) {
-      Emit(kArmSxth, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
-      return;
-    } else if (mleft.right().Is(24) && m.right().Is(24)) {
-      Emit(kArmSxtb, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
-      return;
+    if (m.right().HasValue() && mleft.right().HasValue()) {
+      uint32_t sar = m.right().Value();
+      uint32_t shl = mleft.right().Value();
+      if ((sar == shl) && (sar == 16)) {
+        Emit(kArmSxth, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+        return;
+      } else if ((sar == shl) && (sar == 24)) {
+        Emit(kArmSxtb, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+        return;
+      } else if (IsSupported(ARMv7) && (sar >= shl)) {
+        Emit(kArmSbfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
+             g.TempImmediate(32 - sar));
+        return;
+      }
     }
   }
   VisitShift(this, node, TryMatchASR);
@@ -726,6 +773,12 @@
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+  DCHECK(IsSupported(ARMv7));
+  VisitRR(this, kArmRbit, node);
+}
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
 
@@ -921,6 +974,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kArmVcvtF32S32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kArmVcvtF32U32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kArmVcvtF64S32, node);
 }
@@ -931,6 +994,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kArmVcvtS32F32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kArmVcvtU32F32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRR(this, kArmVcvtS32F64, node);
 }
@@ -1591,6 +1664,9 @@
   MachineOperatorBuilder::Flags flags =
       MachineOperatorBuilder::kInt32DivIsSafe |
       MachineOperatorBuilder::kUint32DivIsSafe;
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    flags |= MachineOperatorBuilder::kWord32ReverseBits;
+  }
   if (CpuFeatures::IsSupported(ARMv8)) {
     flags |= MachineOperatorBuilder::kFloat32RoundDown |
              MachineOperatorBuilder::kFloat64RoundDown |
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index d356195..e45c677 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -270,7 +270,7 @@
 
 class OutOfLineRecordWrite final : public OutOfLineCode {
  public:
-  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
                        Register value, Register scratch0, Register scratch1,
                        RecordWriteMode mode)
       : OutOfLineCode(gen),
@@ -285,24 +285,30 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlagClear(value_, scratch0_,
-                            MemoryChunk::kPointersToHereAreInterestingMask,
-                            exit());
-    }
+    __ CheckPageFlagClear(value_, scratch0_,
+                          MemoryChunk::kPointersToHereAreInterestingMask,
+                          exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Push(lr);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Add(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(lr);
+    }
   }
 
  private:
   Register const object_;
-  Register const index_;
+  Operand const index_;
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -488,7 +494,8 @@
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   Arm64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -499,6 +506,14 @@
         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
         __ Call(target);
       }
+      // TODO(titzer): this is ugly. JSSP should be a caller-save register
+      // in this case, but it is not possible to express in the register
+      // allocator.
+      CallDescriptor::Flags flags =
+          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      if (flags & CallDescriptor::kRestoreJSSP) {
+        __ mov(jssp, csp);
+      }
       frame_access_state()->ClearSPDelta();
       RecordCallPosition(instr);
       break;
@@ -530,6 +545,14 @@
       }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Call(x10);
+      // TODO(titzer): this is ugly. JSSP should be a caller-save register
+      // in this case, but it is not possible to express in the register
+      // allocator.
+      CallDescriptor::Flags flags =
+          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      if (flags & CallDescriptor::kRestoreJSSP) {
+        __ mov(jssp, csp);
+      }
       frame_access_state()->ClearSPDelta();
       RecordCallPosition(instr);
       break;
@@ -551,11 +574,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction:
       // We don't need kArchPrepareCallCFunction on arm64 as the instruction
       // selector already perform a Claim to reserve space on the stack and
@@ -609,14 +627,29 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
     case kArchStoreWithWriteBarrier: {
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
       Register object = i.InputRegister(0);
-      Register index = i.InputRegister(1);
+      Operand index(0);
+      if (addressing_mode == kMode_MRI) {
+        index = Operand(i.InputInt64(1));
+      } else {
+        DCHECK_EQ(addressing_mode, kMode_MRR);
+        index = Operand(i.InputRegister(1));
+      }
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
@@ -629,6 +662,18 @@
       __ Bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = __ StackPointer();
+      } else {
+        base = fp;
+      }
+      __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
+      break;
+    }
     case kArm64Float32RoundDown:
       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -885,18 +930,41 @@
     case kArm64CompareAndBranch32:
       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
-    case kArm64ClaimForCallArguments: {
-      __ Claim(i.InputInt32(0));
-      frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
+    case kArm64ClaimCSP: {
+      int count = i.InputInt32(0);
+      Register prev = __ StackPointer();
+      __ SetStackPointer(csp);
+      __ Claim(count);
+      __ SetStackPointer(prev);
+      frame_access_state()->IncreaseSPDelta(count);
       break;
     }
-    case kArm64Poke: {
+    case kArm64ClaimJSSP: {
+      int count = i.InputInt32(0);
+      if (csp.Is(__ StackPointer())) {
+        // No JSP is set up. Compute it from the CSP.
+        int even = RoundUp(count, 2);
+        __ Sub(jssp, csp, count * kPointerSize);
+        __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
+        frame_access_state()->IncreaseSPDelta(even);
+      } else {
+        // JSSP is the current stack pointer, just use regular Claim().
+        __ Claim(count);
+        frame_access_state()->IncreaseSPDelta(count);
+      }
+      break;
+    }
+    case kArm64PokeCSP:  // fall through
+    case kArm64PokeJSSP: {
+      Register prev = __ StackPointer();
+      __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
       Operand operand(i.InputInt32(1) * kPointerSize);
       if (instr->InputAt(0)->IsDoubleRegister()) {
         __ Poke(i.InputFloat64Register(0), operand);
       } else {
         __ Poke(i.InputRegister(0), operand);
       }
+      __ SetStackPointer(prev);
       break;
     }
     case kArm64PokePair: {
@@ -916,6 +984,12 @@
     case kArm64Clz32:
       __ Clz(i.OutputRegister32(), i.InputRegister32(0));
       break;
+    case kArm64Rbit:
+      __ Rbit(i.OutputRegister64(), i.InputRegister64(0));
+      break;
+    case kArm64Rbit32:
+      __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
+      break;
     case kArm64Cmp:
       __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
       break;
@@ -1042,9 +1116,15 @@
     case kArm64Float64ToFloat32:
       __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float32ToInt32:
+      __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
+      break;
     case kArm64Float64ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float32ToUint32:
+      __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
+      break;
     case kArm64Float64ToUint32:
       __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
@@ -1093,6 +1173,9 @@
         __ Cset(i.OutputRegister(1), ne);
       }
       break;
+    case kArm64Int32ToFloat32:
+      __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+      break;
     case kArm64Int32ToFloat64:
       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
       break;
@@ -1102,6 +1185,9 @@
     case kArm64Int64ToFloat64:
       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
       break;
+    case kArm64Uint32ToFloat32:
+      __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+      break;
     case kArm64Uint32ToFloat64:
       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
       break;
@@ -1376,8 +1462,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
@@ -1445,13 +1529,14 @@
       __ Bind(&return_label_);
       if (descriptor->UseNativeStack()) {
         __ Mov(csp, fp);
+        pop_count += (pop_count & 1);  // align
       } else {
         __ Mov(jssp, fp);
       }
       __ Pop(fp, lr);
     }
   } else if (descriptor->UseNativeStack()) {
-    pop_count += (pop_count & 1);
+    pop_count += (pop_count & 1);  // align
   }
   __ Drop(pop_count);
   __ Ret();
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index ef33348..f03c2fb 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -73,11 +73,15 @@
   V(Arm64Ubfx32)                   \
   V(Arm64Ubfiz32)                  \
   V(Arm64Bfi)                      \
+  V(Arm64Rbit)                     \
+  V(Arm64Rbit32)                   \
   V(Arm64TestAndBranch32)          \
   V(Arm64TestAndBranch)            \
   V(Arm64CompareAndBranch32)       \
-  V(Arm64ClaimForCallArguments)    \
-  V(Arm64Poke)                     \
+  V(Arm64ClaimCSP)                 \
+  V(Arm64ClaimJSSP)                \
+  V(Arm64PokeCSP)                  \
+  V(Arm64PokeJSSP)                 \
   V(Arm64PokePair)                 \
   V(Arm64Float32Cmp)               \
   V(Arm64Float32Add)               \
@@ -110,15 +114,19 @@
   V(Arm64Float64RoundTiesEven)     \
   V(Arm64Float32ToFloat64)         \
   V(Arm64Float64ToFloat32)         \
+  V(Arm64Float32ToInt32)           \
   V(Arm64Float64ToInt32)           \
+  V(Arm64Float32ToUint32)          \
   V(Arm64Float64ToUint32)          \
   V(Arm64Float32ToInt64)           \
   V(Arm64Float64ToInt64)           \
   V(Arm64Float32ToUint64)          \
   V(Arm64Float64ToUint64)          \
+  V(Arm64Int32ToFloat32)           \
   V(Arm64Int32ToFloat64)           \
   V(Arm64Int64ToFloat32)           \
   V(Arm64Int64ToFloat64)           \
+  V(Arm64Uint32ToFloat32)          \
   V(Arm64Uint32ToFloat64)          \
   V(Arm64Uint64ToFloat32)          \
   V(Arm64Uint64ToFloat64)          \
@@ -143,7 +151,6 @@
   V(Arm64Ldr)                      \
   V(Arm64Str)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
@@ -169,6 +176,8 @@
   V(Operand2_R_SXTB)  /* %r0 SXTB (signed extend byte) */       \
   V(Operand2_R_SXTH)  /* %r0 SXTH (signed extend halfword) */
 
+enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index eb358dd..ca37299 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -75,6 +75,8 @@
     case kArm64Ubfx32:
     case kArm64Ubfiz32:
     case kArm64Bfi:
+    case kArm64Rbit:
+    case kArm64Rbit32:
     case kArm64Float32Cmp:
     case kArm64Float32Add:
     case kArm64Float32Sub:
@@ -106,15 +108,19 @@
     case kArm64Float32RoundUp:
     case kArm64Float32ToFloat64:
     case kArm64Float64ToFloat32:
+    case kArm64Float32ToInt32:
     case kArm64Float64ToInt32:
+    case kArm64Float32ToUint32:
     case kArm64Float64ToUint32:
     case kArm64Float32ToInt64:
     case kArm64Float64ToInt64:
     case kArm64Float32ToUint64:
     case kArm64Float64ToUint64:
+    case kArm64Int32ToFloat32:
     case kArm64Int32ToFloat64:
     case kArm64Int64ToFloat32:
     case kArm64Int64ToFloat64:
+    case kArm64Uint32ToFloat32:
     case kArm64Uint32ToFloat64:
     case kArm64Uint64ToFloat32:
     case kArm64Uint64ToFloat64:
@@ -141,8 +147,10 @@
     case kArm64Ldr:
       return kIsLoadOperation;
 
-    case kArm64ClaimForCallArguments:
-    case kArm64Poke:
+    case kArm64ClaimCSP:
+    case kArm64ClaimJSSP:
+    case kArm64PokeCSP:
+    case kArm64PokeJSSP:
     case kArm64PokePair:
     case kArm64StrS:
     case kArm64StrD:
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 1ec5ab4..26a2896 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -371,6 +371,7 @@
       opcode = kArm64Ldr;
       immediate_mode = kLoadStoreImm64;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -398,10 +399,20 @@
   // TODO(arm64): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(index);
+    // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+    // must check kArithmeticImm as well as kLoadStoreImm64.
+    if (g.CanBeImmediate(index, kArithmeticImm) &&
+        g.CanBeImmediate(index, kLoadStoreImm64)) {
+      inputs[input_count++] = g.UseImmediate(index);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(index);
+      addressing_mode = kMode_MRR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -423,6 +434,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -455,6 +467,7 @@
         opcode = kArm64Str;
         immediate_mode = kLoadStoreImm64;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -496,8 +509,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -534,8 +548,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -963,6 +978,16 @@
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+  VisitRR(this, kArm64Rbit32, node);
+}
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+  VisitRR(this, kArm64Rbit, node);
+}
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
 
@@ -1219,6 +1244,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kArm64Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kArm64Uint32ToFloat32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kArm64Int32ToFloat64, node);
 }
@@ -1229,11 +1264,21 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kArm64Float32ToInt32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRR(this, kArm64Float64ToInt32, node);
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kArm64Float32ToUint32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   VisitRR(this, kArm64Float64ToUint32, node);
 }
@@ -1583,30 +1628,27 @@
     Node* node) {
   Arm64OperandGenerator g(this);
 
-  // Push the arguments to the stack.
-  int aligned_push_count = static_cast<int>(arguments->size());
+  bool to_native_stack = descriptor->UseNativeStack();
 
-  bool pushed_count_uneven = aligned_push_count & 1;
-  int claim_count = aligned_push_count;
-  if (pushed_count_uneven && descriptor->UseNativeStack()) {
-    // We can only claim for an even number of call arguments when we use the
-    // native stack.
-    claim_count++;
+  int claim_count = static_cast<int>(arguments->size());
+  int slot = claim_count - 1;
+  if (to_native_stack) {
+    // Native stack must always be aligned to 16 (2 words).
+    claim_count = RoundUp(claim_count, 2);
   }
-  // TODO(dcarney): claim and poke probably take small immediates,
-  //                loop here or whatever.
+  // TODO(titzer): claim and poke probably take small immediates.
   // Bump the stack pointer(s).
-  if (aligned_push_count > 0) {
-    // TODO(dcarney): it would be better to bump the csp here only
+  if (claim_count > 0) {
+    // TODO(titzer): it would be better to bump the csp here only
     //                and emit paired stores with increment for non c frames.
-    Emit(kArm64ClaimForCallArguments, g.NoOutput(),
-         g.TempImmediate(claim_count));
+    ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+    Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
   }
 
-  // Move arguments to the stack.
-  int slot = aligned_push_count - 1;
+  // Poke the arguments into the stack.
+  ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
   while (slot >= 0) {
-    Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+    Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
          g.TempImmediate(slot));
     slot--;
     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
@@ -2191,7 +2233,9 @@
          MachineOperatorBuilder::kFloat64RoundTiesEven |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kInt32DivIsSafe |
-         MachineOperatorBuilder::kUint32DivIsSafe;
+         MachineOperatorBuilder::kUint32DivIsSafe |
+         MachineOperatorBuilder::kWord32ReverseBits |
+         MachineOperatorBuilder::kWord64ReverseBits;
 }
 
 }  // namespace compiler
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index c70dfbf..abcf828 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -206,7 +206,6 @@
   int stack_height_;
 };
 
-
 // Helper class for a try-finally control scope. It can record intercepted
 // control-flow commands that cause entry into a finally-block, and re-apply
 // them after again leaving that block. Special tokens are used to identify
@@ -214,7 +213,10 @@
 class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
  public:
   explicit DeferredCommands(AstGraphBuilder* owner)
-      : owner_(owner), deferred_(owner->local_zone()) {}
+      : owner_(owner),
+        deferred_(owner->local_zone()),
+        return_token_(nullptr),
+        throw_token_(nullptr) {}
 
   // One recorded control-flow command.
   struct Entry {
@@ -226,7 +228,24 @@
   // Records a control-flow command while entering the finally-block. This also
   // generates a new dispatch token that identifies one particular path.
   Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
-    Node* token = NewPathTokenForDeferredCommand();
+    Node* token = nullptr;
+    switch (cmd) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+        token = NewPathToken(dispenser_.GetBreakContinueToken());
+        break;
+      case CMD_THROW:
+        if (throw_token_) return throw_token_;
+        token = NewPathToken(TokenDispenserForFinally::kThrowToken);
+        throw_token_ = token;
+        break;
+      case CMD_RETURN:
+        if (return_token_) return return_token_;
+        token = NewPathToken(TokenDispenserForFinally::kReturnToken);
+        return_token_ = token;
+        break;
+    }
+    DCHECK_NOT_NULL(token);
     deferred_.push_back({cmd, stmt, token});
     return token;
   }
@@ -255,11 +274,11 @@
   }
 
  protected:
-  Node* NewPathTokenForDeferredCommand() {
-    return owner_->jsgraph()->Constant(static_cast<int>(deferred_.size()));
+  Node* NewPathToken(int token_id) {
+    return owner_->jsgraph()->Constant(token_id);
   }
   Node* NewPathTokenForImplicitFallThrough() {
-    return owner_->jsgraph()->Constant(-1);
+    return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
   }
   Node* NewPathDispatchCondition(Node* t1, Node* t2) {
     // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
@@ -268,8 +287,11 @@
   }
 
  private:
+  TokenDispenserForFinally dispenser_;
   AstGraphBuilder* owner_;
   ZoneVector<Entry> deferred_;
+  Node* return_token_;
+  Node* throw_token_;
 };
 
 
@@ -409,10 +431,13 @@
       DCHECK_EQ(IrOpcode::kDead,
                 NodeProperties::GetFrameStateInput(node, 0)->opcode());
 
+      bool node_has_exception = NodeProperties::IsExceptionalCall(node);
+
       Node* frame_state_after =
           id_after == BailoutId::None()
               ? builder_->jsgraph()->EmptyFrameState()
-              : builder_->environment()->Checkpoint(id_after, combine);
+              : builder_->environment()->Checkpoint(id_after, combine,
+                                                    node_has_exception);
 
       NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
     }
@@ -455,8 +480,7 @@
                          local_zone),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
-          info->scope()->num_stack_slots(), info->shared_info(),
-          CALL_MAINTAINS_NATIVE_CONTEXT)) {
+          info->scope()->num_stack_slots(), info->shared_info())) {
   InitializeAstVisitor(info->isolate());
 }
 
@@ -589,7 +613,7 @@
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
-    NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
+    NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
   }
 
   // Visit illegal re-declaration and bail out if it exists.
@@ -610,13 +634,6 @@
   // Visit statements in the function body.
   VisitStatements(info()->literal()->body());
 
-  // Emit tracing call if requested to do so.
-  if (FLAG_trace) {
-    // TODO(mstarzinger): Only traces implicit return.
-    Node* return_value = jsgraph()->UndefinedConstant();
-    NewNode(javascript()->CallRuntime(Runtime::kTraceExit, 1), return_value);
-  }
-
   // Return 'undefined' in case we can fall off the end.
   BuildReturn(jsgraph()->UndefinedConstant());
 }
@@ -854,9 +871,9 @@
       env_values, static_cast<size_t>(count));
 }
 
-
-Node* AstGraphBuilder::Environment::Checkpoint(
-    BailoutId ast_id, OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
+                                               OutputFrameStateCombine combine,
+                                               bool owner_has_exception) {
   if (!builder()->info()->is_deoptimization_enabled()) {
     return builder()->jsgraph()->EmptyFrameState();
   }
@@ -876,7 +893,15 @@
 
   DCHECK(IsLivenessBlockConsistent());
   if (liveness_block() != nullptr) {
-    liveness_block()->Checkpoint(result);
+    // If the owning node has an exception, register the checkpoint to the
+    // predecessor so that the checkpoint is used for both the normal and the
+    // exceptional paths. Yes, this is a terrible hack and we might want
+    // to use an explicit frame state for the exceptional path.
+    if (owner_has_exception) {
+      liveness_block()->GetPredecessor()->Checkpoint(result);
+    } else {
+      liveness_block()->Checkpoint(result);
+    }
   }
   return result;
 }
@@ -1331,7 +1356,8 @@
 
     // Prepare for-in cache.
     Node* prepare = NewNode(javascript()->ForInPrepare(), object);
-    PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push());
+    PrepareFrameState(prepare, stmt->PrepareId(),
+                      OutputFrameStateCombine::Push(3));
     Node* cache_type = NewNode(common()->Projection(0), prepare);
     Node* cache_array = NewNode(common()->Projection(1), prepare);
     Node* cache_length = NewNode(common()->Projection(2), prepare);
@@ -1422,14 +1448,6 @@
   }
   try_control.EndTry();
 
-  // Insert lazy bailout point.
-  // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
-  // point. Ideally, we whould not re-enter optimized code when deoptimized
-  // lazily. Tracked by issue v8:4195.
-  NewNode(common()->LazyBailout(),
-          jsgraph()->ZeroConstant(),                      // dummy target.
-          environment()->Checkpoint(stmt->HandlerId()));  // frame state.
-
   // Clear message object as we enter the catch block.
   Node* the_hole = jsgraph()->TheHoleConstant();
   NewNode(javascript()->StoreMessage(), the_hole);
@@ -1474,14 +1492,6 @@
   }
   try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
 
-  // Insert lazy bailout point.
-  // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
-  // point. Ideally, we whould not re-enter optimized code when deoptimized
-  // lazily. Tracked by issue v8:4195.
-  NewNode(common()->LazyBailout(),
-          jsgraph()->ZeroConstant(),                      // dummy target.
-          environment()->Checkpoint(stmt->HandlerId()));  // frame state.
-
   // The result value semantics depend on how the block was entered:
   //  - ReturnStatement: It represents the return value being returned.
   //  - ThrowStatement: It represents the exception being thrown.
@@ -1493,7 +1503,7 @@
   // The result value, dispatch token and message is expected on the operand
   // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
   Node* message = NewNode(javascript()->LoadMessage());
-  environment()->Push(token);  // TODO(mstarzinger): Cook token!
+  environment()->Push(token);
   environment()->Push(result);
   environment()->Push(message);
 
@@ -1509,20 +1519,17 @@
   // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
   message = environment()->Pop();
   result = environment()->Pop();
-  token = environment()->Pop();  // TODO(mstarzinger): Uncook token!
+  token = environment()->Pop();
   NewNode(javascript()->StoreMessage(), message);
 
   // Dynamic dispatch after the finally-block.
   commands->ApplyDeferredCommands(token, result);
-
-  // TODO(mstarzinger): Remove bailout once everything works.
-  if (!FLAG_turbo_try_finally) SetStackOverflow();
 }
 
 
 void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   Node* node =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement, 0));
+      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
   PrepareFrameState(node, stmt->DebugBreakId());
   environment()->MarkAllLocalsLive();
 }
@@ -1557,33 +1564,27 @@
 
 
 void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
-  Node* class_name = expr->raw_name() ? jsgraph()->Constant(expr->name())
-                                      : jsgraph()->UndefinedConstant();
-
-  // The class name is expected on the operand stack.
-  environment()->Push(class_name);
   VisitForValueOrTheHole(expr->extends());
   VisitForValue(expr->constructor());
 
   // Create node to instantiate a new class.
   Node* constructor = environment()->Pop();
   Node* extends = environment()->Pop();
-  Node* name = environment()->Pop();
   Node* start = jsgraph()->Constant(expr->start_position());
   Node* end = jsgraph()->Constant(expr->end_position());
-  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 5);
-  Node* literal = NewNode(opc, name, extends, constructor, start, end);
+  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
+  Node* literal = NewNode(opc, extends, constructor, start, end);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
-
-  // The prototype is ensured to exist by Runtime_DefineClass. No access check
-  // is needed here since the constructor is created by the class literal.
-  Node* prototype =
-      BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
-
-  // The class literal and the prototype are both expected on the operand stack
-  // during evaluation of the method values.
   environment()->Push(literal);
+
+  // Load the "prototype" from the constructor.
+  FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+  Handle<Name> name = isolate()->factory()->prototype_string();
+  VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
+  Node* prototype = BuildNamedLoad(literal, name, pair);
+  states.AddToNode(prototype, expr->PrototypeId(),
+                   OutputFrameStateCombine::Push());
   environment()->Push(prototype);
 
   // Create nodes to store method values into the literal.
@@ -1618,9 +1619,12 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED: {
+        Node* attr = jsgraph()->Constant(DONT_ENUM);
+        Node* set_function_name =
+            jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineClassMethod, 3);
-        NewNode(op, receiver, key, value);
+            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+        NewNode(op, receiver, key, value, attr, set_function_name);
         break;
       }
       case ObjectLiteral::Property::GETTER: {
@@ -1645,7 +1649,7 @@
   prototype = environment()->Pop();
   literal = environment()->Pop();
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+      javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
   literal = NewNode(op, literal, prototype);
 
   // Assign to class variable.
@@ -1774,8 +1778,7 @@
         Node* receiver = environment()->Pop();
         if (property->emit_store()) {
           Node* language = jsgraph()->Constant(SLOPPY);
-          const Operator* op =
-              javascript()->CallRuntime(Runtime::kSetProperty, 4);
+          const Operator* op = javascript()->CallRuntime(Runtime::kSetProperty);
           Node* set_property = NewNode(op, receiver, key, value, language);
           // SetProperty should not lazy deopt on an object literal.
           PrepareFrameState(set_property, BailoutId::None());
@@ -1790,7 +1793,7 @@
         Node* receiver = environment()->Pop();
         DCHECK(property->emit_store());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+            javascript()->CallRuntime(Runtime::kInternalSetPrototype);
         Node* set_prototype = NewNode(op, receiver, value);
         // SetPrototype should not lazy deopt on an object literal.
         PrepareFrameState(set_prototype,
@@ -1823,7 +1826,7 @@
     Node* name = environment()->Pop();
     Node* attr = jsgraph()->Constant(NONE);
     const Operator* op =
-        javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+        javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
     Node* call = NewNode(op, literal, name, getter, setter, attr);
     // This should not lazy deopt on a new literal.
     PrepareFrameState(call, BailoutId::None());
@@ -1847,7 +1850,7 @@
       Node* value = environment()->Pop();
       Node* receiver = environment()->Pop();
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+          javascript()->CallRuntime(Runtime::kInternalSetPrototype);
       Node* call = NewNode(op, receiver, value);
       PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
       continue;
@@ -1868,10 +1871,11 @@
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
         Node* attr = jsgraph()->Constant(NONE);
+        Node* set_function_name =
+            jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
+            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+        NewNode(op, receiver, key, value, attr, set_function_name);
         break;
       }
       case ObjectLiteral::Property::PROTOTYPE:
@@ -1899,8 +1903,7 @@
   // Transform literals that contain functions to fast properties.
   literal = environment()->Top();  // Reload from operand stack.
   if (expr->has_function()) {
-    const Operator* op =
-        javascript()->CallRuntime(Runtime::kToFastProperties, 1);
+    const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
     NewNode(op, literal);
   }
 
@@ -1939,7 +1942,7 @@
   int array_index = 0;
   for (; array_index < expr->values()->length(); array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     VisitForValue(subexpr);
@@ -1962,30 +1965,17 @@
   // number elements an iterable produces is unknown ahead of time.
   for (; array_index < expr->values()->length(); array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
-    Node* result;
+    DCHECK(!subexpr->IsSpread());
 
-    if (subexpr->IsSpread()) {
-      VisitForValue(subexpr->AsSpread()->expression());
-      FrameStateBeforeAndAfter states(this,
-                                      subexpr->AsSpread()->expression()->id());
-      Node* iterable = environment()->Pop();
-      Node* array = environment()->Pop();
-      Node* function = BuildLoadNativeContextField(
-          Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
-      result = NewNode(javascript()->CallFunction(3, language_mode()), function,
-                       array, iterable);
-      states.AddToNode(result, expr->GetIdForElement(array_index));
-    } else {
-      VisitForValue(subexpr);
+    VisitForValue(subexpr);
+    {
       Node* value = environment()->Pop();
       Node* array = environment()->Pop();
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kAppendElement, 2);
-      result = NewNode(op, array, value);
+      const Operator* op = javascript()->CallRuntime(Runtime::kAppendElement);
+      Node* result = NewNode(op, array, value);
       PrepareFrameState(result, expr->GetIdForElement(array_index));
+      environment()->Push(result);
     }
-
-    environment()->Push(result);
   }
 
   ast_context()->ProduceValue(environment()->Pop());
@@ -2343,8 +2333,8 @@
       DCHECK(variable->location() == VariableLocation::LOOKUP);
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
-      Node* pair = NewNode(op, current_context(), name);
+          javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+      Node* pair = NewNode(op, name);
       callee_value = NewNode(common()->Projection(0), pair);
       receiver_value = NewNode(common()->Projection(1), pair);
       PrepareFrameState(pair, expr->LookupId(),
@@ -2439,8 +2429,8 @@
         Variable* variable = callee->AsVariableProxy()->var();
         Node* name = jsgraph()->Constant(variable->name());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
-        Node* pair = NewNode(op, current_context(), name);
+            javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+        Node* pair = NewNode(op, name);
         callee_value = NewNode(common()->Projection(0), pair);
         receiver_value = NewNode(common()->Projection(1), pair);
         PrepareFrameState(pair, expr->LookupId(),
@@ -2480,7 +2470,7 @@
     Node* language = jsgraph()->Constant(language_mode());
     Node* position = jsgraph()->Constant(current_scope()->start_position());
     const Operator* op =
-        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
     Node* new_callee =
         NewNode(op, callee, source, function, language, position);
     PrepareFrameState(new_callee, expr->EvalId(),
@@ -2493,7 +2483,7 @@
   // Create node to perform the function call.
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call = javascript()->CallFunction(
-      args->length() + 2, language_mode(), feedback, receiver_hint);
+      args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   environment()->Push(value->InputAt(0));  // The callee passed to the call.
@@ -2571,8 +2561,7 @@
   VisitForValues(args);
 
   // Create node to perform the JS runtime call.
-  const Operator* call =
-      javascript()->CallFunction(args->length() + 2, language_mode());
+  const Operator* call = javascript()->CallFunction(args->length() + 2);
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
@@ -2591,6 +2580,7 @@
 
   // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
   if (function->function_id == Runtime::kInlineGeneratorNext ||
+      function->function_id == Runtime::kInlineGeneratorReturn ||
       function->function_id == Runtime::kInlineGeneratorThrow) {
     ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
     return SetStackOverflow();
@@ -2740,7 +2730,7 @@
     // TODO(bmeurer): Cleanup this feedback/bailout mess!
     FrameStateBeforeAndAfter states(this, BailoutId::None());
     value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
-                          expr->binary_op(), TypeFeedbackId::None());
+                          expr->binary_op(), expr->CountBinOpFeedbackId());
     // This should never deoptimize outside strong mode because otherwise we
     // have converted to number before.
     states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
@@ -2848,16 +2838,16 @@
       op = javascript()->StrictNotEqual();
       break;
     case Token::LT:
-      op = javascript()->LessThan(language_mode());
+      op = javascript()->LessThan();
       break;
     case Token::GT:
-      op = javascript()->GreaterThan(language_mode());
+      op = javascript()->GreaterThan();
       break;
     case Token::LTE:
-      op = javascript()->LessThanOrEqual(language_mode());
+      op = javascript()->LessThanOrEqual();
       break;
     case Token::GTE:
-      op = javascript()->GreaterThanOrEqual(language_mode());
+      op = javascript()->GreaterThanOrEqual();
       break;
     case Token::INSTANCEOF:
       op = javascript()->InstanceOf();
@@ -2930,7 +2920,7 @@
                       DeclareGlobalsLanguageMode::encode(language_mode());
   Node* flags = jsgraph()->Constant(encoded_flags);
   Node* pairs = jsgraph()->Constant(data);
-  const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 2);
+  const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
   Node* call = NewNode(op, pairs, flags);
   PrepareFrameState(call, BailoutId::Declarations());
   globals()->clear();
@@ -3072,8 +3062,7 @@
 }
 
 
-void AstGraphBuilder::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
   Visit(node->expression());
 }
 
@@ -3209,11 +3198,11 @@
   if (arguments == nullptr) return nullptr;
 
   // Allocate and initialize a new arguments object.
-  CreateArgumentsParameters::Type type =
+  CreateArgumentsType type =
       is_strict(language_mode()) || !info()->has_simple_parameters()
-          ? CreateArgumentsParameters::kUnmappedArguments
-          : CreateArgumentsParameters::kMappedArguments;
-  const Operator* op = javascript()->CreateArguments(type, 0);
+          ? CreateArgumentsType::kUnmappedArguments
+          : CreateArgumentsType::kMappedArguments;
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   PrepareFrameState(object, BailoutId::None());
 
@@ -3231,8 +3220,8 @@
   if (rest == nullptr) return nullptr;
 
   // Allocate and initialize a new arguments object.
-  CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
-  const Operator* op = javascript()->CreateArguments(type, index);
+  CreateArgumentsType type = CreateArgumentsType::kRestParameter;
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   PrepareFrameState(object, BailoutId::None());
 
@@ -3405,8 +3394,7 @@
                                      feedback, combine, typeof_mode)) {
         return node;
       }
-      const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-      Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+      Node* value = BuildDynamicLoad(name, typeof_mode);
       states.AddToNode(value, bailout_id, combine);
       return value;
     }
@@ -3440,8 +3428,8 @@
       // Dynamic lookup of context variable (anywhere in the chain).
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
-      Node* result = NewNode(op, current_context(), name);
+          javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
+      Node* result = NewNode(op, name);
       PrepareFrameState(result, bailout_id, combine);
       return result;
     }
@@ -3563,13 +3551,10 @@
     }
     case VariableLocation::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
-      Node* name = jsgraph()->Constant(variable->name());
-      Node* language = jsgraph()->Constant(language_mode());
+      Handle<Name> name = variable->name();
       // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
       // initializations of const declarations.
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
-      Node* store = NewNode(op, value, current_context(), name, language);
+      Node* store = BuildDynamicStore(name, value);
       PrepareFrameState(store, bailout_id, combine);
       return store;
     }
@@ -3581,16 +3566,16 @@
 
 Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
                                       const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadProperty(feedback);
+  Node* node = NewNode(op, object, key, GetFunctionClosure());
   return node;
 }
 
 
 Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
                                       const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadNamed(name, feedback);
+  Node* node = NewNode(op, object, GetFunctionClosure());
   return node;
 }
 
@@ -3598,7 +3583,7 @@
 Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
                                        const VectorSlotPair& feedback) {
   const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
   return node;
 }
 
@@ -3608,7 +3593,7 @@
                                        const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, object, value, GetFunctionClosure());
   return node;
 }
 
@@ -3617,9 +3602,8 @@
                                            Handle<Name> name,
                                            const VectorSlotPair& feedback) {
   Node* name_node = jsgraph()->Constant(name);
-  Node* language = jsgraph()->Constant(language_mode());
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
-  Node* node = NewNode(op, receiver, home_object, name_node, language);
+  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
+  Node* node = NewNode(op, receiver, home_object, name_node);
   return node;
 }
 
@@ -3627,10 +3611,8 @@
 Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
                                            Node* key,
                                            const VectorSlotPair& feedback) {
-  Node* language = jsgraph()->Constant(language_mode());
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-  Node* node = NewNode(op, receiver, home_object, key, language);
+  const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
+  Node* node = NewNode(op, receiver, home_object, key);
   return node;
 }
 
@@ -3662,7 +3644,7 @@
                                        const VectorSlotPair& feedback,
                                        TypeofMode typeof_mode) {
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, GetFunctionClosure());
   return node;
 }
 
@@ -3671,22 +3653,30 @@
                                         const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, value, GetFunctionClosure());
   return node;
 }
 
 
-Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
-  return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
-                 jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
+                                        TypeofMode typeof_mode) {
+  Node* name_node = jsgraph()->Constant(name);
+  const Operator* op =
+      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                    ? Runtime::kLoadLookupSlot
+                                    : Runtime::kLoadLookupSlotInsideTypeof);
+  Node* node = NewNode(op, name_node);
+  return node;
 }
 
 
-Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
-  return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
-                          object,
-                          jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
-                          graph()->start(), graph()->start());
+Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
+  Node* name_node = jsgraph()->Constant(name);
+  const Operator* op = javascript()->CallRuntime(
+      is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
+                                 : Runtime::kStoreLookupSlot_Sloppy);
+  Node* node = NewNode(op, name_node, value);
+  return node;
 }
 
 
@@ -3703,19 +3693,6 @@
 }
 
 
-Node* AstGraphBuilder::BuildLoadFeedbackVector() {
-  if (!feedback_vector_.is_set()) {
-    Node* closure = GetFunctionClosure();
-    Node* shared = BuildLoadImmutableObjectField(
-        closure, JSFunction::kSharedFunctionInfoOffset);
-    Node* vector = BuildLoadImmutableObjectField(
-        shared, SharedFunctionInfo::kFeedbackVectorOffset);
-    feedback_vector_.set(vector);
-  }
-  return feedback_vector_.get();
-}
-
-
 Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
   if (Node* node = TryFastToBoolean(input)) return node;
   ToBooleanHints hints;
@@ -3758,7 +3735,7 @@
 
 
 Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
-  const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
   Node* call = NewNode(op, exception);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3770,8 +3747,7 @@
 Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
                                                 BailoutId bailout_id) {
   Node* variable_name = jsgraph()->Constant(variable->name());
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
   Node* call = NewNode(op, variable_name);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3782,7 +3758,7 @@
 
 Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
+      javascript()->CallRuntime(Runtime::kThrowConstAssignError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3793,7 +3769,7 @@
 
 Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError, 0);
+      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3804,7 +3780,7 @@
 
 Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3814,6 +3790,11 @@
 
 
 Node* AstGraphBuilder::BuildReturn(Node* return_value) {
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    return_value =
+        NewNode(javascript()->CallRuntime(Runtime::kTraceExit), return_value);
+  }
   Node* control = NewNode(common()->Return(), return_value);
   UpdateControlDependencyToLeaveFunction(control);
   return control;
@@ -3821,7 +3802,7 @@
 
 
 Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
-  NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), exception_value);
+  NewNode(javascript()->CallRuntime(Runtime::kReThrow), exception_value);
   Node* control = NewNode(common()->Throw(), exception_value);
   UpdateControlDependencyToLeaveFunction(control);
   return control;
@@ -3838,37 +3819,37 @@
   }
   switch (op) {
     case Token::BIT_OR:
-      js_op = javascript()->BitwiseOr(language_mode(), hints);
+      js_op = javascript()->BitwiseOr(hints);
       break;
     case Token::BIT_AND:
-      js_op = javascript()->BitwiseAnd(language_mode(), hints);
+      js_op = javascript()->BitwiseAnd(hints);
       break;
     case Token::BIT_XOR:
-      js_op = javascript()->BitwiseXor(language_mode(), hints);
+      js_op = javascript()->BitwiseXor(hints);
       break;
     case Token::SHL:
-      js_op = javascript()->ShiftLeft(language_mode(), hints);
+      js_op = javascript()->ShiftLeft(hints);
       break;
     case Token::SAR:
-      js_op = javascript()->ShiftRight(language_mode(), hints);
+      js_op = javascript()->ShiftRight(hints);
       break;
     case Token::SHR:
-      js_op = javascript()->ShiftRightLogical(language_mode(), hints);
+      js_op = javascript()->ShiftRightLogical(hints);
       break;
     case Token::ADD:
-      js_op = javascript()->Add(language_mode(), hints);
+      js_op = javascript()->Add(hints);
       break;
     case Token::SUB:
-      js_op = javascript()->Subtract(language_mode(), hints);
+      js_op = javascript()->Subtract(hints);
       break;
     case Token::MUL:
-      js_op = javascript()->Multiply(language_mode(), hints);
+      js_op = javascript()->Multiply(hints);
       break;
     case Token::DIV:
-      js_op = javascript()->Divide(language_mode(), hints);
+      js_op = javascript()->Divide(hints);
       break;
     case Token::MOD:
-      js_op = javascript()->Modulus(language_mode(), hints);
+      js_op = javascript()->Modulus(hints);
       break;
     default:
       UNREACHABLE();
@@ -3916,17 +3897,21 @@
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
 
-    // Fast case, because variable is not shadowed. Perform global slot load.
-    Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
-    states.AddToNode(fast, bailout_id, combine);
-    environment()->Push(fast);
+    // Fast case, because variable is not shadowed.
+    if (Node* constant = TryLoadGlobalConstant(name)) {
+      environment()->Push(constant);
+    } else {
+      // Perform global slot load.
+      Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+      states.AddToNode(fast, bailout_id, combine);
+      environment()->Push(fast);
+    }
     slow_block.Break();
     environment()->Pop();
     fast_block.EndBlock();
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-    Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+    Node* slow = BuildDynamicLoad(name, typeof_mode);
     states.AddToNode(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
@@ -3969,8 +3954,7 @@
     fast_block.EndBlock();
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-    Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+    Node* slow = BuildDynamicLoad(name, typeof_mode);
     states.AddToNode(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
@@ -4047,8 +4031,10 @@
 
     DCHECK_EQ(IrOpcode::kDead,
               NodeProperties::GetFrameStateInput(node, 0)->opcode());
+    bool node_has_exception = NodeProperties::IsExceptionalCall(node);
     NodeProperties::ReplaceFrameStateInput(
-        node, 0, environment()->Checkpoint(ast_id, combine));
+        node, 0,
+        environment()->Checkpoint(ast_id, combine, node_has_exception));
   }
 }
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 3b6302d..6cff237 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -314,14 +314,13 @@
   Node* BuildGlobalStore(Handle<Name> name, Node* value,
                          const VectorSlotPair& feedback);
 
+  // Builders for dynamic variable loads and stores.
+  Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
+  Node* BuildDynamicStore(Handle<Name> name, Node* value);
+
   // Builders for accessing the function context.
   Node* BuildLoadGlobalObject();
   Node* BuildLoadNativeContextField(int index);
-  Node* BuildLoadFeedbackVector();
-
-  // Builder for accessing a (potentially immutable) object field.
-  Node* BuildLoadObjectField(Node* object, int offset);
-  Node* BuildLoadImmutableObjectField(Node* object, int offset);
 
   // Builders for automatic type conversion.
   Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
@@ -519,7 +518,8 @@
   // Preserve a checkpoint of the environment for the IR graph. Any
   // further mutation of the environment will not affect checkpoints.
   Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
-                                         OutputFrameStateCombine::Ignore());
+                                         OutputFrameStateCombine::Ignore(),
+                   bool node_has_exception = false);
 
   // Control dependency tracked by this environment.
   Node* GetControlDependency() { return control_dependency_; }
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 2074c94..ac96399 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -198,7 +198,7 @@
 }
 
 
-void ALAA::VisitSpread(Spread* e) { Visit(e->expression()); }
+void ALAA::VisitSpread(Spread* e) { UNREACHABLE(); }
 
 
 void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
@@ -266,7 +266,6 @@
   Visit(loop->assign_iterator());
   Enter(loop);
   Visit(loop->assign_each());
-  Visit(loop->each());
   Visit(loop->subject());
   Visit(loop->body());
   Exit(loop);
@@ -288,8 +287,7 @@
 }
 
 
-void ALAA::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void ALAA::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
diff --git a/src/compiler/bytecode-branch-analysis.cc b/src/compiler/bytecode-branch-analysis.cc
index 27699a1..4e96a53 100644
--- a/src/compiler/bytecode-branch-analysis.cc
+++ b/src/compiler/bytecode-branch-analysis.cc
@@ -11,115 +11,33 @@
 namespace internal {
 namespace compiler {
 
-// The class contains all of the sites that contain
-// branches to a particular target (bytecode offset).
-class BytecodeBranchInfo final : public ZoneObject {
- public:
-  explicit BytecodeBranchInfo(Zone* zone)
-      : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
-
-  void AddBranch(int source_offset, int target_offset);
-
-  // The offsets of bytecodes that refer to this bytecode as
-  // a back-edge predecessor.
-  const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
-
-  // The offsets of bytecodes that refer to this bytecode as
-  // a forwards-edge predecessor.
-  const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
-
- private:
-  ZoneVector<int> back_edge_offsets_;
-  ZoneVector<int> fore_edge_offsets_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
-};
-
-
-void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
-  if (source_offset < target_offset) {
-    fore_edge_offsets_.push_back(source_offset);
-  } else {
-    back_edge_offsets_.push_back(source_offset);
-  }
-}
-
-
 BytecodeBranchAnalysis::BytecodeBranchAnalysis(
     Handle<BytecodeArray> bytecode_array, Zone* zone)
-    : branch_infos_(zone),
-      bytecode_array_(bytecode_array),
-      reachable_(bytecode_array->length(), zone),
+    : bytecode_array_(bytecode_array),
+      is_backward_target_(bytecode_array->length(), zone),
+      is_forward_target_(bytecode_array->length(), zone),
       zone_(zone) {}
 
-
 void BytecodeBranchAnalysis::Analyze() {
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  bool reachable = true;
   while (!iterator.done()) {
     interpreter::Bytecode bytecode = iterator.current_bytecode();
     int current_offset = iterator.current_offset();
-    // All bytecode basic blocks are generated to be forward reachable
-    // and may also be backward reachable. Hence if there's a forward
-    // branch targetting here the code becomes reachable.
-    reachable = reachable || forward_branches_target(current_offset);
-    if (reachable) {
-      reachable_.Add(current_offset);
-      if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
-        // Only the branch is recorded, the forward path falls through
-        // and is handled as normal bytecode data flow.
-        AddBranch(current_offset, iterator.GetJumpTargetOffset());
-      } else if (interpreter::Bytecodes::IsJump(bytecode)) {
-        // Unless the branch targets the next bytecode it's not
-        // reachable. If it targets the next bytecode the check at the
-        // start of the loop will set the reachable flag.
-        AddBranch(current_offset, iterator.GetJumpTargetOffset());
-        reachable = false;
-      } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
-        DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
-        reachable = false;
-      }
+    if (interpreter::Bytecodes::IsJump(bytecode)) {
+      AddBranch(current_offset, iterator.GetJumpTargetOffset());
     }
     iterator.Advance();
   }
 }
 
-
-const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
-    int offset) const {
-  auto iterator = branch_infos_.find(offset);
-  if (branch_infos_.end() != iterator) {
-    return iterator->second->back_edge_offsets();
-  } else {
-    return nullptr;
-  }
-}
-
-
-const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
-    int offset) const {
-  auto iterator = branch_infos_.find(offset);
-  if (branch_infos_.end() != iterator) {
-    return iterator->second->fore_edge_offsets();
-  } else {
-    return nullptr;
-  }
-}
-
-
 void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
-  BytecodeBranchInfo* branch_info = nullptr;
-  auto iterator = branch_infos_.find(target_offset);
-  if (branch_infos_.end() == iterator) {
-    branch_info = new (zone()) BytecodeBranchInfo(zone());
-    branch_infos_.insert(std::make_pair(target_offset, branch_info));
+  if (source_offset < target_offset) {
+    is_forward_target_.Add(target_offset);
   } else {
-    branch_info = iterator->second;
+    is_backward_target_.Add(target_offset);
   }
-  branch_info->AddBranch(source_offset, target_offset);
 }
 
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/bytecode-branch-analysis.h b/src/compiler/bytecode-branch-analysis.h
index 0ef33b6..7d32da8 100644
--- a/src/compiler/bytecode-branch-analysis.h
+++ b/src/compiler/bytecode-branch-analysis.h
@@ -7,7 +7,6 @@
 
 #include "src/bit-vector.h"
 #include "src/handles.h"
-#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -16,15 +15,13 @@
 
 namespace compiler {
 
-class BytecodeBranchInfo;
-
-// A class for identifying the branch targets and their branch sites
-// within a bytecode array and also identifying which bytecodes are
-// reachable. This information can be used to construct the local
-// control flow logic for high-level IR graphs built from bytecode.
+// A class for identifying branch targets within a bytecode array.
+// This information can be used to construct the local control flow
+// logic for high-level IR graphs built from bytecode.
 //
-// NB This class relies on the only backwards branches in bytecode
-// being jumps back to loop headers.
+// N.B. If this class is used to determine loop headers, then such a
+// usage relies on the only backwards branches in bytecode being jumps
+// back to loop headers.
 class BytecodeBranchAnalysis BASE_EMBEDDED {
  public:
   BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
@@ -34,27 +31,16 @@
   // until this has been called.
   void Analyze();
 
-  // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
-  const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
-
-  // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
-  const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
-
-  // Returns true if the bytecode at |offset| is reachable.
-  bool is_reachable(int offset) const { return reachable_.Contains(offset); }
-
   // Returns true if there are any forward branches to the bytecode at
   // |offset|.
   bool forward_branches_target(int offset) const {
-    const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
-    return sites != nullptr && sites->size() > 0;
+    return is_forward_target_.Contains(offset);
   }
 
   // Returns true if there are any backward branches to the bytecode
   // at |offset|.
   bool backward_branches_target(int offset) const {
-    const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
-    return sites != nullptr && sites->size() > 0;
+    return is_backward_target_.Contains(offset);
   }
 
  private:
@@ -63,9 +49,9 @@
   Zone* zone() const { return zone_; }
   Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
 
-  ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
   Handle<BytecodeArray> bytecode_array_;
-  BitVector reachable_;
+  BitVector is_backward_target_;
+  BitVector is_forward_target_;
   Zone* zone_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index cf0b6ab..e28c19d 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -13,25 +13,108 @@
 namespace internal {
 namespace compiler {
 
+// The abstract execution environment simulates the content of the interpreter
+// register file. The environment performs SSA-renaming of all tracked nodes at
+// split and merge points in the control flow.
+class BytecodeGraphBuilder::Environment : public ZoneObject {
+ public:
+  Environment(BytecodeGraphBuilder* builder, int register_count,
+              int parameter_count, Node* control_dependency, Node* context);
+
+  int parameter_count() const { return parameter_count_; }
+  int register_count() const { return register_count_; }
+
+  Node* LookupAccumulator() const;
+  Node* LookupRegister(interpreter::Register the_register) const;
+
+  void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+  void BindRegister(interpreter::Register the_register, Node* node,
+                    FrameStateBeforeAndAfter* states = nullptr);
+  void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+                                  FrameStateBeforeAndAfter* states = nullptr);
+  void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+
+  // Effect dependency tracked by this environment.
+  Node* GetEffectDependency() { return effect_dependency_; }
+  void UpdateEffectDependency(Node* dependency) {
+    effect_dependency_ = dependency;
+  }
+
+  // Preserve a checkpoint of the environment for the IR graph. Any
+  // further mutation of the environment will not affect checkpoints.
+  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+  // Returns true if the state values are up to date with the current
+  // environment.
+  bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
+  // Control dependency tracked by this environment.
+  Node* GetControlDependency() const { return control_dependency_; }
+  void UpdateControlDependency(Node* dependency) {
+    control_dependency_ = dependency;
+  }
+
+  Node* Context() const { return context_; }
+  void SetContext(Node* new_context) { context_ = new_context; }
+
+  Environment* CopyForConditional() const;
+  Environment* CopyForLoop();
+  void Merge(Environment* other);
+
+ private:
+  explicit Environment(const Environment* copy);
+  void PrepareForLoop();
+  bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+                              int output_poke_start, int output_poke_end);
+  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+  void UpdateStateValues(Node** state_values, int offset, int count);
+
+  int RegisterToValuesIndex(interpreter::Register the_register) const;
+
+  Zone* zone() const { return builder_->local_zone(); }
+  Graph* graph() const { return builder_->graph(); }
+  CommonOperatorBuilder* common() const { return builder_->common(); }
+  BytecodeGraphBuilder* builder() const { return builder_; }
+  const NodeVector* values() const { return &values_; }
+  NodeVector* values() { return &values_; }
+  int register_base() const { return register_base_; }
+  int accumulator_base() const { return accumulator_base_; }
+
+  BytecodeGraphBuilder* builder_;
+  int register_count_;
+  int parameter_count_;
+  Node* context_;
+  Node* control_dependency_;
+  Node* effect_dependency_;
+  NodeVector values_;
+  Node* parameters_state_values_;
+  Node* registers_state_values_;
+  Node* accumulator_state_values_;
+  int register_base_;
+  int accumulator_base_;
+};
+
 // Helper for generating frame states for before and after a bytecode.
 class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
  public:
-  FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
-                           const interpreter::BytecodeArrayIterator& iterator)
+  explicit FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder)
       : builder_(builder),
         id_after_(BailoutId::None()),
         added_to_node_(false),
+        frame_states_unused_(false),
         output_poke_offset_(0),
         output_poke_count_(0) {
-    BailoutId id_before(iterator.current_offset());
+    BailoutId id_before(builder->bytecode_iterator().current_offset());
     frame_state_before_ = builder_->environment()->Checkpoint(
         id_before, OutputFrameStateCombine::Ignore());
-    id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+    id_after_ = BailoutId(id_before.ToInt() +
+                          builder->bytecode_iterator().current_bytecode_size());
   }
 
   ~FrameStateBeforeAndAfter() {
     DCHECK(added_to_node_);
-    DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+    DCHECK(frame_states_unused_ ||
+           builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
                                                            output_poke_count_));
   }
 
@@ -62,6 +145,7 @@
       output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
       output_poke_count_ = node->op()->ValueOutputCount();
     }
+    frame_states_unused_ = count == 0;
     added_to_node_ = true;
   }
 
@@ -70,6 +154,7 @@
   BailoutId id_after_;
 
   bool added_to_node_;
+  bool frame_states_unused_;
   int output_poke_offset_;
   int output_poke_count_;
 };
@@ -155,8 +240,8 @@
 
 Node* BytecodeGraphBuilder::Environment::LookupRegister(
     interpreter::Register the_register) const {
-  if (the_register.is_function_context()) {
-    return builder()->GetFunctionContext();
+  if (the_register.is_current_context()) {
+    return Context();
   } else if (the_register.is_function_closure()) {
     return builder()->GetFunctionClosure();
   } else if (the_register.is_new_target()) {
@@ -168,16 +253,6 @@
 }
 
 
-void BytecodeGraphBuilder::Environment::ExchangeRegisters(
-    interpreter::Register reg0, interpreter::Register reg1) {
-  int reg0_index = RegisterToValuesIndex(reg0);
-  int reg1_index = RegisterToValuesIndex(reg1);
-  Node* saved_reg0_value = values()->at(reg0_index);
-  values()->at(reg0_index) = values()->at(reg1_index);
-  values()->at(reg1_index) = saved_reg0_value;
-}
-
-
 void BytecodeGraphBuilder::Environment::BindAccumulator(
     Node* node, FrameStateBeforeAndAfter* states) {
   if (states) {
@@ -220,16 +295,6 @@
 }
 
 
-bool BytecodeGraphBuilder::Environment::IsMarkedAsUnreachable() const {
-  return GetControlDependency()->opcode() == IrOpcode::kDead;
-}
-
-
-void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
-  UpdateControlDependency(builder()->jsgraph()->Dead());
-}
-
-
 BytecodeGraphBuilder::Environment*
 BytecodeGraphBuilder::Environment::CopyForLoop() {
   PrepareForLoop();
@@ -245,11 +310,6 @@
 
 void BytecodeGraphBuilder::Environment::Merge(
     BytecodeGraphBuilder::Environment* other) {
-  // Nothing to do if the other environment is dead.
-  if (other->IsMarkedAsUnreachable()) {
-    return;
-  }
-
   // Create a merge of the control dependencies of both environments and update
   // the current environment's control dependency accordingly.
   Node* control = builder()->MergeControl(GetControlDependency(),
@@ -295,7 +355,7 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
     Node** state_values, int offset, int count) {
-  if (!builder()->info()->is_deoptimization_enabled()) {
+  if (!builder()->deoptimization_enabled_) {
     return false;
   }
   if (*state_values == nullptr) {
@@ -325,7 +385,7 @@
 
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine) {
-  if (!builder()->info()->is_deoptimization_enabled()) {
+  if (!builder()->deoptimization_enabled_) {
     return builder()->jsgraph()->EmptyFrameState();
   }
 
@@ -363,6 +423,7 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
     int output_poke_offset, int output_poke_count) {
+  if (!builder()->deoptimization_enabled_) return true;
   // Poke offset is relative to the top of the stack (i.e., the accumulator).
   int output_poke_start = accumulator_base() - output_poke_offset;
   int output_poke_end = output_poke_start + output_poke_count;
@@ -375,26 +436,27 @@
                                 1, output_poke_start, output_poke_end);
 }
 
-
 BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
-                                           CompilationInfo* compilation_info,
+                                           CompilationInfo* info,
                                            JSGraph* jsgraph)
     : local_zone_(local_zone),
-      info_(compilation_info),
       jsgraph_(jsgraph),
-      bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+      bytecode_array_(handle(info->shared_info()->bytecode_array())),
+      exception_handler_table_(
+          handle(HandlerTable::cast(bytecode_array()->handler_table()))),
+      feedback_vector_(info->feedback_vector()),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
-          bytecode_array()->register_count(), info()->shared_info(),
-          CALL_MAINTAINS_NATIVE_CONTEXT)),
+          bytecode_array()->register_count(), info->shared_info())),
+      deoptimization_enabled_(info->is_deoptimization_enabled()),
       merge_environments_(local_zone),
-      loop_header_environments_(local_zone),
+      exception_handlers_(local_zone),
+      current_exception_handler_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
       exit_controls_(local_zone) {}
 
-
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
     int params = bytecode_array()->parameter_count();
@@ -430,21 +492,6 @@
 }
 
 
-Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
-  return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
-                 jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
-                                                          int offset) {
-  return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
-                          object,
-                          jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
-                          graph()->start(), graph()->start());
-}
-
-
 Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
@@ -453,30 +500,15 @@
 }
 
 
-Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
-  if (!feedback_vector_.is_set()) {
-    Node* closure = GetFunctionClosure();
-    Node* shared = BuildLoadImmutableObjectField(
-        closure, JSFunction::kSharedFunctionInfoOffset);
-    Node* vector = BuildLoadImmutableObjectField(
-        shared, SharedFunctionInfo::kFeedbackVectorOffset);
-    feedback_vector_.set(vector);
-  }
-  return feedback_vector_.get();
-}
-
-
 VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
-  Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
   FeedbackVectorSlot slot;
   if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
-    slot = feedback_vector->ToSlot(slot_id);
+    slot = feedback_vector()->ToSlot(slot_id);
   }
-  return VectorSlotPair(feedback_vector, slot);
+  return VectorSlotPair(feedback_vector(), slot);
 }
 
-
-bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+bool BytecodeGraphBuilder::CreateGraph() {
   // Set up the basic structure of the graph. Outputs for {Start} are
   // the formal parameters (including the receiver) plus context and
   // closure.
@@ -492,7 +524,7 @@
                   GetFunctionContext());
   set_environment(&env);
 
-  CreateGraphBody(stack_check);
+  VisitBytecodes();
 
   // Finish the basic structure of the graph.
   DCHECK_NE(0u, exit_controls_.size());
@@ -504,20 +536,6 @@
   return true;
 }
 
-
-void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
-  // TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
-  // object setup, this function variable if used, tracing hooks.
-
-  if (stack_check) {
-    Node* node = NewNode(javascript()->StackCheck());
-    PrepareEntryFrameState(node);
-  }
-
-  VisitBytecodes();
-}
-
-
 void BytecodeGraphBuilder::VisitBytecodes() {
   BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
   analysis.Analyze();
@@ -526,14 +544,15 @@
   set_bytecode_iterator(&iterator);
   while (!iterator.done()) {
     int current_offset = iterator.current_offset();
-    if (analysis.is_reachable(current_offset)) {
-      MergeEnvironmentsOfForwardBranches(current_offset);
-      BuildLoopHeaderForBackwardBranches(current_offset);
+    EnterAndExitExceptionHandlers(current_offset);
+    SwitchToMergeEnvironment(current_offset);
+    if (environment() != nullptr) {
+      BuildLoopHeaderEnvironment(current_offset);
 
       switch (iterator.current_bytecode()) {
 #define BYTECODE_CASE(name, ...)       \
   case interpreter::Bytecode::k##name: \
-    Visit##name(iterator);             \
+    Visit##name();                     \
     break;
         BYTECODE_LIST(BYTECODE_CASE)
 #undef BYTECODE_CODE
@@ -543,635 +562,417 @@
   }
   set_branch_analysis(nullptr);
   set_bytecode_iterator(nullptr);
+  DCHECK(exception_handlers_.empty());
 }
 
-
-void BytecodeGraphBuilder::VisitLdaZero(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaZero() {
   Node* node = jsgraph()->ZeroConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaSmi8(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+void BytecodeGraphBuilder::VisitLdaSmi8() {
+  Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstantWide() {
+  Node* node =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstant() {
+  Node* node =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaUndefined(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaUndefined() {
   Node* node = jsgraph()->UndefinedConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaNull(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaNull() {
   Node* node = jsgraph()->NullConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaTheHole(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTheHole() {
   Node* node = jsgraph()->TheHoleConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTrue() {
   Node* node = jsgraph()->TrueConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaFalse() {
   Node* node = jsgraph()->FalseConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdar(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitLdar() {
+  Node* value =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   environment()->BindAccumulator(value);
 }
 
-
-void BytecodeGraphBuilder::VisitStar(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStar() {
   Node* value = environment()->LookupAccumulator();
-  environment()->BindRegister(iterator.GetRegisterOperand(0), value);
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value);
 }
 
-
-void BytecodeGraphBuilder::VisitMov(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  environment()->BindRegister(iterator.GetRegisterOperand(1), value);
+void BytecodeGraphBuilder::VisitMov() {
+  Node* value =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-
-void BytecodeGraphBuilder::VisitExchange(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
-                                   iterator.GetRegisterOperand(1));
-}
-
-
-void BytecodeGraphBuilder::VisitExchangeWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
-                                   iterator.GetRegisterOperand(1));
-}
-
+void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
 
 void BytecodeGraphBuilder::BuildLoadGlobal(
-    const interpreter::BytecodeArrayIterator& iterator,
     TypeofMode typeof_mode) {
-  FrameStateBeforeAndAfter states(this, iterator);
+  FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobal() {
+  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
+  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalWide() {
+  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
+  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::BuildStoreGlobal(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
   Node* value = environment()->LookupAccumulator();
 
-  const Operator* op =
-      javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
+  Node* node = NewNode(op, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
+  BuildStoreGlobal(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrict() {
+  BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
+  BuildStoreGlobal(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
+  BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaContextSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
   // code, when the implementation changes.
   // TODO(mythria): immutable flag is also set to false. This information is not
   // available in bytecode array. update this code when the implementation
   // changes.
-  const Operator* op =
-      javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  const Operator* op = javascript()->LoadContext(
+      0, bytecode_iterator().GetIndexOperand(1), false);
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* node = NewNode(op, context);
   environment()->BindAccumulator(node);
 }
 
+void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
 
-void BytecodeGraphBuilder::VisitLdaContextSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitStaContextSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
   // code, when the implementation changes.
   const Operator* op =
-      javascript()->StoreContext(0, iterator.GetIndexOperand(1));
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+      javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(1));
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* value = environment()->LookupAccumulator();
   NewNode(op, context, value);
 }
 
+void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
 
-void BytecodeGraphBuilder::VisitStaContextSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildLdaLookupSlot(
-    TypeofMode typeof_mode,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Handle<String> name =
-      Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
-  const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-  Node* value =
-      NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
+  FrameStateBeforeAndAfter states(this);
+  Node* name =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+  const Operator* op =
+      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                    ? Runtime::kLoadLookupSlot
+                                    : Runtime::kLoadLookupSlotInsideTypeof);
+  Node* value = NewNode(op, name);
   environment()->BindAccumulator(value, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlot() {
+  BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
+  BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::BuildStaLookupSlot(
-    LanguageMode language_mode,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
-  Node* language = jsgraph()->Constant(language_mode);
-  const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
-  Node* store = NewNode(op, value, environment()->Context(), name, language);
+  Node* name =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+  const Operator* op = javascript()->CallRuntime(
+      is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
+                               : Runtime::kStoreLookupSlot_Sloppy);
+  Node* store = NewNode(op, name, value);
   environment()->BindAccumulator(store, &states);
 }
 
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
 
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaLookupSlot(iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
+  VisitLdaLookupSlotInsideTypeof();
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaLookupSlotInsideTypeof(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
+  BuildStaLookupSlot(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
+  BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
+  VisitStaLookupSlotSloppy();
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaLookupSlotSloppy(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
+  VisitStaLookupSlotStrict();
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaLookupSlotStrict(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildNamedLoad(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildNamedLoad() {
+  FrameStateBeforeAndAfter states(this);
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadNamed(name, feedback);
+  Node* node = NewNode(op, object, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
+void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
 
-void BytecodeGraphBuilder::VisitLoadICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedLoad(iterator);
-}
+void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
 
-
-void BytecodeGraphBuilder::VisitLoadICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildKeyedLoad(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedLoad() {
+  FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
-  const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadProperty(feedback);
+  Node* node = NewNode(op, object, key, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
+void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
 
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedLoad(iterator);
-}
+void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
 
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildNamedStore(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op =
-      javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
+  Node* node = NewNode(op, object, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppy() {
+  BuildNamedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrict() {
+  BuildNamedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
+  BuildNamedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrictWide() {
+  BuildNamedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::BuildKeyedStore(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* key =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+  BuildKeyedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+  BuildKeyedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
+  BuildKeyedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
+  BuildKeyedStore(LanguageMode::STRICT);
 }
 
+void BytecodeGraphBuilder::VisitPushContext() {
+  Node* new_context = environment()->LookupAccumulator();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
+                              environment()->Context());
+  environment()->SetContext(new_context);
+}
 
-void BytecodeGraphBuilder::VisitPushContext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* context = environment()->LookupAccumulator();
-  environment()->BindRegister(iterator.GetRegisterOperand(0), context);
+void BytecodeGraphBuilder::VisitPopContext() {
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   environment()->SetContext(context);
 }
 
-
-void BytecodeGraphBuilder::VisitPopContext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  environment()->SetContext(context);
-}
-
-
-void BytecodeGraphBuilder::VisitCreateClosure(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<SharedFunctionInfo> shared_info =
-      Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitCreateClosure() {
+  Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
   PretenureFlag tenured =
-      iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+      bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
 
+void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
 
-void BytecodeGraphBuilder::VisitCreateClosureWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitCreateClosure(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildCreateArguments(
-    CreateArgumentsParameters::Type type,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* op = javascript()->CreateArguments(type, 0);
+void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(object, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateMappedArguments(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateMappedArguments() {
+  BuildCreateArguments(CreateArgumentsType::kMappedArguments);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments() {
+  BuildCreateArguments(CreateArgumentsType::kUnmappedArguments);
 }
 
+void BytecodeGraphBuilder::VisitCreateRestParameter() {
+  BuildCreateArguments(CreateArgumentsType::kRestParameter);
+}
 
-void BytecodeGraphBuilder::BuildCreateLiteral(
-    const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
+  FrameStateBeforeAndAfter states(this);
   Node* literal = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(literal, &states);
 }
 
-
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
   Handle<String> constant_pattern =
-      Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+      Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralRegExp(
       constant_pattern, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
+  BuildCreateRegExpLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
+  BuildCreateRegExpLiteral();
 }
 
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<FixedArray> constant_elements =
-      Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+  Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralArray(
       constant_elements, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
+  BuildCreateArrayLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
+  BuildCreateArrayLiteral();
 }
 
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<FixedArray> constant_properties =
-      Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
+  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralObject(
       constant_properties, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
+  BuildCreateObjectLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
+  BuildCreateObjectLiteral();
 }
 
 
@@ -1179,7 +980,7 @@
                                                  Node* callee,
                                                  interpreter::Register receiver,
                                                  size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+  Node** all = local_zone()->NewArray<Node*>(static_cast<int>(arity));
   all[0] = callee;
   all[1] = environment()->LookupRegister(receiver);
   int receiver_index = receiver.index();
@@ -1191,57 +992,58 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::BuildCall(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
+  FrameStateBeforeAndAfter states(this);
   // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
   // register has been loaded with null / undefined explicitly or we are sure it
   // is not null / undefined.
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
-  Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  interpreter::Register receiver = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+  Node* callee =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
 
   const Operator* call = javascript()->CallFunction(
-      arg_count + 2, language_mode(), feedback, receiver_hint);
-  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+      arg_count + 1, feedback, receiver_hint, tail_call_mode);
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
 
-void BytecodeGraphBuilder::VisitCall(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCall(iterator);
+void BytecodeGraphBuilder::VisitCallWide() {
+  BuildCall(TailCallMode::kDisallow);
 }
 
+void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
 
-void BytecodeGraphBuilder::VisitCallWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCall(iterator);
+void BytecodeGraphBuilder::VisitTailCallWide() {
+  BuildCall(TailCallMode::kAllow);
 }
 
-
-void BytecodeGraphBuilder::VisitCallJSRuntime(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
-  interpreter::Register receiver = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
+void BytecodeGraphBuilder::BuildCallJSRuntime() {
+  FrameStateBeforeAndAfter states(this);
+  Node* callee =
+      BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the JS runtime call.
-  const Operator* call =
-      javascript()->CallFunction(arg_count + 2, language_mode());
-  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+  const Operator* call = javascript()->CallFunction(arg_count + 1);
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
+
+void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
 
 Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
     const Operator* call_runtime_op, interpreter::Register first_arg,
     size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(arity);
+  Node** all = local_zone()->NewArray<Node*>(arity);
   int first_arg_index = first_arg.index();
   for (int i = 0; i < static_cast<int>(arity); ++i) {
     all[i] = environment()->LookupRegister(
@@ -1251,14 +1053,12 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::VisitCallRuntime(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCallRuntime() {
+  FrameStateBeforeAndAfter states(this);
   Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
+      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1266,15 +1066,18 @@
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPair(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
+
+void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+  FrameStateBeforeAndAfter states(this);
   Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-  interpreter::Register first_return = iterator.GetRegisterOperand(3);
+      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  interpreter::Register first_return =
+      bytecode_iterator().GetRegisterOperand(3);
 
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1282,164 +1085,151 @@
   environment()->BindRegistersToProjections(first_return, return_pair, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
+  BuildCallRuntimeForPair();
+}
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
+  BuildCallRuntimeForPair();
+}
 
 Node* BytecodeGraphBuilder::ProcessCallNewArguments(
-    const Operator* call_new_op, interpreter::Register callee,
+    const Operator* call_new_op, Node* callee, Node* new_target,
     interpreter::Register first_arg, size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(arity);
-  all[0] = environment()->LookupRegister(callee);
+  Node** all = local_zone()->NewArray<Node*>(arity);
+  all[0] = new_target;
   int first_arg_index = first_arg.index();
   for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
     all[i] = environment()->LookupRegister(
         interpreter::Register(first_arg_index + i - 1));
   }
-  // Original constructor is the same as the callee.
-  all[arity - 1] = environment()->LookupRegister(callee);
+  all[arity - 1] = callee;
   Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
   return value;
 }
 
+void BytecodeGraphBuilder::BuildCallConstruct() {
+  FrameStateBeforeAndAfter states(this);
+  interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
-void BytecodeGraphBuilder::VisitNew(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  interpreter::Register callee = iterator.GetRegisterOperand(0);
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-
+  Node* new_target = environment()->LookupAccumulator();
+  Node* callee = environment()->LookupRegister(callee_reg);
   // TODO(turbofan): Pass the feedback here.
   const Operator* call = javascript()->CallConstruct(
       static_cast<int>(arg_count) + 2, VectorSlotPair());
-  Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+  Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
+                                        arg_count + 2);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
 
-void BytecodeGraphBuilder::VisitThrow(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
+
+void BytecodeGraphBuilder::BuildThrow() {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
-  // information support in the interpreter.
-  NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
-  Node* control = NewNode(common()->Throw(), value);
-  environment()->RecordAfterState(control, &states);
-  UpdateControlDependencyToLeaveFunction(control);
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
+  environment()->BindAccumulator(call, &states);
 }
 
+void BytecodeGraphBuilder::VisitThrow() {
+  BuildThrow();
+  Node* call = environment()->LookupAccumulator();
+  Node* control = NewNode(common()->Throw(), call);
+  MergeControlToLeaveFunction(control);
+}
 
-void BytecodeGraphBuilder::BuildBinaryOp(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitReThrow() {
+  Node* value = environment()->LookupAccumulator();
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
+  Node* control = NewNode(common()->Throw(), call);
+  MergeControlToLeaveFunction(control);
+}
+
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
+  Node* left =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitAdd(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitAdd() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Add(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitSub(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitSub() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Subtract(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitMul(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMul() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Multiply(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitDiv(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitDiv() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Divide(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitMod(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMod() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Modulus(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseOr(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseOr() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseOr(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseXor(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseXor() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseXor(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseAnd(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseAnd() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseAnd(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftLeft(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftLeft() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->ShiftLeft(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftRight(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRight() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->ShiftRight(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftRightLogical(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRightLogical() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
-                iterator);
+  BuildBinaryOp(javascript()->ShiftRightLogical(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitInc(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* js_op =
-      javascript()->Add(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitInc() {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitDec(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* js_op =
-      javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitDec() {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLogicalNot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLogicalNot() {
   Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
                         environment()->LookupAccumulator());
   Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1447,408 +1237,307 @@
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitTypeOf(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitTypeOf() {
   Node* node =
       NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::BuildDelete(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* node =
-      NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+      NewNode(javascript()->DeleteProperty(language_mode), object, key);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitDeletePropertyStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildDelete(iterator);
+void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
+  BuildDelete(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitDeletePropertySloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildDelete(iterator);
+void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
+  BuildDelete(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitDeleteLookupSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* name = environment()->LookupAccumulator();
-  const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
-  Node* result = NewNode(op, environment()->Context(), name);
-  environment()->BindAccumulator(result, &states);
-}
-
-
-void BytecodeGraphBuilder::BuildCompareOp(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
+  Node* left =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitTestEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->Equal(), iterator);
+void BytecodeGraphBuilder::VisitTestEqual() {
+  BuildCompareOp(javascript()->Equal());
 }
 
-
-void BytecodeGraphBuilder::VisitTestNotEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->NotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqual() {
+  BuildCompareOp(javascript()->NotEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestEqualStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->StrictEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestEqualStrict() {
+  BuildCompareOp(javascript()->StrictEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestNotEqualStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->StrictNotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
+  BuildCompareOp(javascript()->StrictNotEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestLessThan(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThan() {
+  BuildCompareOp(javascript()->LessThan());
 }
 
-
-void BytecodeGraphBuilder::VisitTestGreaterThan(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThan() {
+  BuildCompareOp(javascript()->GreaterThan());
 }
 
-
-void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
+  BuildCompareOp(javascript()->LessThanOrEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
+  BuildCompareOp(javascript()->GreaterThanOrEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestIn(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->HasProperty(), iterator);
+void BytecodeGraphBuilder::VisitTestIn() {
+  BuildCompareOp(javascript()->HasProperty());
 }
 
-
-void BytecodeGraphBuilder::VisitTestInstanceOf(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->InstanceOf(), iterator);
+void BytecodeGraphBuilder::VisitTestInstanceOf() {
+  BuildCompareOp(javascript()->InstanceOf());
 }
 
-
-void BytecodeGraphBuilder::BuildCastOperator(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
   Node* node = NewNode(js_op, environment()->LookupAccumulator());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitToName(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToName(), iterator);
+void BytecodeGraphBuilder::VisitToName() {
+  BuildCastOperator(javascript()->ToName());
 }
 
-
-void BytecodeGraphBuilder::VisitToObject(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToObject(), iterator);
+void BytecodeGraphBuilder::VisitToObject() {
+  BuildCastOperator(javascript()->ToObject());
 }
 
-
-void BytecodeGraphBuilder::VisitToNumber(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToNumber(), iterator);
+void BytecodeGraphBuilder::VisitToNumber() {
+  BuildCastOperator(javascript()->ToNumber());
 }
 
+void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJump(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
+void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
 
+void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJumpConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
-
-
-void BytecodeGraphBuilder::VisitJumpConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
-
-
-void BytecodeGraphBuilder::VisitJumpIfTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrue() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalse() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
+void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
 
-void BytecodeGraphBuilder::VisitJumpIfNull(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
+  BuildJumpIfNotHole();
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
+  BuildJumpIfNotHole();
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfNullConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNullConstant() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefined(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefined() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
+void BytecodeGraphBuilder::VisitStackCheck() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = NewNode(javascript()->StackCheck());
+  environment()->RecordAfterState(node, &states);
+}
 
-void BytecodeGraphBuilder::VisitReturn(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitReturn() {
   Node* control =
       NewNode(common()->Return(), environment()->LookupAccumulator());
-  UpdateControlDependencyToLeaveFunction(control);
-  set_environment(nullptr);
+  MergeControlToLeaveFunction(control);
 }
 
-
-void BytecodeGraphBuilder::VisitForInPrepare(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* prepare = nullptr;
-  {
-    FrameStateBeforeAndAfter states(this, iterator);
-    Node* receiver = environment()->LookupAccumulator();
-    prepare = NewNode(javascript()->ForInPrepare(), receiver);
-    environment()->RecordAfterState(prepare, &states);
-  }
-  // Project cache_type, cache_array, cache_length into register
-  // operands 1, 2, 3.
-  for (int i = 0; i < 3; i++) {
-    environment()->BindRegister(iterator.GetRegisterOperand(i),
-                                NewNode(common()->Projection(i), prepare));
-  }
+void BytecodeGraphBuilder::VisitDebugger() {
+  FrameStateBeforeAndAfter states(this);
+  Node* call =
+      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
+  environment()->BindAccumulator(call, &states);
 }
 
+// We cannot create a graph from the debugger copy of the bytecode array.
+#define DEBUG_BREAK(Name, ...) \
+  void BytecodeGraphBuilder::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
 
-void BytecodeGraphBuilder::VisitForInDone(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildForInPrepare() {
+  FrameStateBeforeAndAfter states(this);
+  Node* receiver = environment()->LookupAccumulator();
+  Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
+  environment()->BindRegistersToProjections(
+      bytecode_iterator().GetRegisterOperand(0), prepare, &states);
+}
+
+void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
+
+void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
+
+void BytecodeGraphBuilder::VisitForInDone() {
+  FrameStateBeforeAndAfter states(this);
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* cache_length =
-      environment()->LookupRegister(iterator.GetRegisterOperand(1));
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
   environment()->BindAccumulator(exit_cond, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitForInNext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildForInNext() {
+  FrameStateBeforeAndAfter states(this);
   Node* receiver =
-      environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  Node* cache_type =
-      environment()->LookupRegister(iterator.GetRegisterOperand(1));
-  Node* cache_array =
-      environment()->LookupRegister(iterator.GetRegisterOperand(2));
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  int catch_reg_pair_index = bytecode_iterator().GetRegisterOperand(2).index();
+  Node* cache_type = environment()->LookupRegister(
+      interpreter::Register(catch_reg_pair_index));
+  Node* cache_array = environment()->LookupRegister(
+      interpreter::Register(catch_reg_pair_index + 1));
+
   Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
                         cache_type, index);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
 
-void BytecodeGraphBuilder::VisitForInStep(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
+
+void BytecodeGraphBuilder::VisitForInStep() {
+  FrameStateBeforeAndAfter states(this);
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   index = NewNode(javascript()->ForInStep(), index);
   environment()->BindAccumulator(index, &states);
 }
 
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
-    int source_offset, int target_offset) {
-  DCHECK_GE(source_offset, target_offset);
-  const ZoneVector<int>* branch_sites =
-      branch_analysis()->BackwardBranchesTargetting(target_offset);
-  if (branch_sites->back() == source_offset) {
-    // The set of back branches is complete, merge them.
-    DCHECK_GE(branch_sites->at(0), target_offset);
-    Environment* merged = merge_environments_[branch_sites->at(0)];
-    for (size_t i = 1; i < branch_sites->size(); i++) {
-      DCHECK_GE(branch_sites->at(i), target_offset);
-      merged->Merge(merge_environments_[branch_sites->at(i)]);
+void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
+  if (merge_environments_[current_offset] != nullptr) {
+    if (environment() != nullptr) {
+      merge_environments_[current_offset]->Merge(environment());
     }
-    // And now merge with loop header environment created when loop
-    // header was visited.
-    loop_header_environments_[target_offset]->Merge(merged);
+    set_environment(merge_environments_[current_offset]);
   }
 }
 
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
-    int source_offset) {
-  if (branch_analysis()->forward_branches_target(source_offset)) {
-    // Merge environments of branches that reach this bytecode.
-    auto branch_sites =
-        branch_analysis()->ForwardBranchesTargetting(source_offset);
-    DCHECK_LT(branch_sites->at(0), source_offset);
-    Environment* merged = merge_environments_[branch_sites->at(0)];
-    for (size_t i = 1; i < branch_sites->size(); i++) {
-      DCHECK_LT(branch_sites->at(i), source_offset);
-      merged->Merge(merge_environments_[branch_sites->at(i)]);
-    }
-    if (environment()) {
-      merged->Merge(environment());
-    }
-    set_environment(merged);
-  }
-}
-
-
-void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
-    int source_offset) {
-  if (branch_analysis()->backward_branches_target(source_offset)) {
+void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
+  if (branch_analysis()->backward_branches_target(current_offset)) {
     // Add loop header and store a copy so we can connect merged back
     // edge inputs to the loop header.
-    loop_header_environments_[source_offset] = environment()->CopyForLoop();
+    merge_environments_[current_offset] = environment()->CopyForLoop();
   }
 }
 
-
-void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
-  DCHECK_NULL(merge_environments_[source_offset]);
-  merge_environments_[source_offset] = environment();
-  if (source_offset >= target_offset) {
-    MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
+  if (merge_environments_[target_offset] == nullptr) {
+    // Append merge nodes to the environment. We may merge here with another
+    // environment. So add a place holder for merge nodes. We may add redundant
+    // but will be eliminated in a later pass.
+    // TODO(mstarzinger): Be smarter about this!
+    NewMerge();
+    merge_environments_[target_offset] = environment();
+  } else {
+    merge_environments_[target_offset]->Merge(environment());
   }
   set_environment(nullptr);
 }
 
+void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
+  exit_controls_.push_back(exit);
+  set_environment(nullptr);
+}
 
 void BytecodeGraphBuilder::BuildJump() {
-  int source_offset = bytecode_iterator()->current_offset();
-  int target_offset = bytecode_iterator()->GetJumpTargetOffset();
-  BuildJump(source_offset, target_offset);
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
 }
 
 
 void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
-  int source_offset = bytecode_iterator()->current_offset();
   NewBranch(condition);
   Environment* if_false_environment = environment()->CopyForConditional();
   NewIfTrue();
-  BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_false_environment);
   NewIfFalse();
 }
@@ -1870,6 +1559,15 @@
   BuildConditionalJump(condition);
 }
 
+void BytecodeGraphBuilder::BuildJumpIfNotHole() {
+  Node* accumulator = environment()->LookupAccumulator();
+  Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
+                            jsgraph()->TheHoleConstant());
+  Node* node =
+      NewNode(common()->Select(MachineRepresentation::kTagged), condition,
+              jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  BuildConditionalJump(node);
+}
 
 Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
   if (size > input_buffer_size_) {
@@ -1880,17 +1578,32 @@
   return input_buffer_;
 }
 
+void BytecodeGraphBuilder::EnterAndExitExceptionHandlers(int current_offset) {
+  Handle<HandlerTable> table = exception_handler_table();
+  int num_entries = table->NumberOfRangeEntries();
 
-void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
-  DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
-  DCHECK_EQ(IrOpcode::kDead,
-            NodeProperties::GetFrameStateInput(node, 0)->opcode());
-  NodeProperties::ReplaceFrameStateInput(
-      node, 0, environment()->Checkpoint(BailoutId(0),
-                                         OutputFrameStateCombine::Ignore()));
+  // Potentially exit exception handlers.
+  while (!exception_handlers_.empty()) {
+    int current_end = exception_handlers_.top().end_offset_;
+    if (current_offset < current_end) break;  // Still covered by range.
+    exception_handlers_.pop();
+  }
+
+  // Potentially enter exception handlers.
+  while (current_exception_handler_ < num_entries) {
+    int next_start = table->GetRangeStart(current_exception_handler_);
+    if (current_offset < next_start) break;  // Not yet covered by range.
+    int next_end = table->GetRangeEnd(current_exception_handler_);
+    int next_handler = table->GetRangeHandler(current_exception_handler_);
+    int context_register = table->GetRangeData(current_exception_handler_);
+    CatchPrediction pred =
+        table->GetRangePrediction(current_exception_handler_);
+    exception_handlers_.push(
+        {next_start, next_end, next_handler, context_register, pred});
+    current_exception_handler_++;
+  }
 }
 
-
 Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
                                      Node** value_inputs, bool incomplete) {
   DCHECK_EQ(op->ValueInputCount(), value_input_count);
@@ -1907,6 +1620,7 @@
   if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
+    bool inside_handler = !exception_handlers_.empty();
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
     input_count_with_deps += frame_state_count;
@@ -1931,21 +1645,40 @@
       *current_input++ = environment()->GetControlDependency();
     }
     result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
-    if (!environment()->IsMarkedAsUnreachable()) {
-      // Update the current control dependency for control-producing nodes.
-      if (NodeProperties::IsControl(result)) {
-        environment()->UpdateControlDependency(result);
-      }
-      // Update the current effect dependency for effect-producing nodes.
-      if (result->op()->EffectOutputCount() > 0) {
-        environment()->UpdateEffectDependency(result);
-      }
-      // Add implicit success continuation for throwing nodes.
-      if (!result->op()->HasProperty(Operator::kNoThrow)) {
-        const Operator* if_success = common()->IfSuccess();
-        Node* on_success = graph()->NewNode(if_success, result);
-        environment_->UpdateControlDependency(on_success);
-      }
+    // Update the current control dependency for control-producing nodes.
+    if (NodeProperties::IsControl(result)) {
+      environment()->UpdateControlDependency(result);
+    }
+    // Update the current effect dependency for effect-producing nodes.
+    if (result->op()->EffectOutputCount() > 0) {
+      environment()->UpdateEffectDependency(result);
+    }
+    // Add implicit exception continuation for throwing nodes.
+    if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
+      int handler_offset = exception_handlers_.top().handler_offset_;
+      int context_index = exception_handlers_.top().context_register_;
+      CatchPrediction prediction = exception_handlers_.top().pred_;
+      interpreter::Register context_register(context_index);
+      IfExceptionHint hint = prediction == CatchPrediction::CAUGHT
+                                 ? IfExceptionHint::kLocallyCaught
+                                 : IfExceptionHint::kLocallyUncaught;
+      Environment* success_env = environment()->CopyForConditional();
+      const Operator* op = common()->IfException(hint);
+      Node* effect = environment()->GetEffectDependency();
+      Node* on_exception = graph()->NewNode(op, effect, result);
+      Node* context = environment()->LookupRegister(context_register);
+      environment()->UpdateControlDependency(on_exception);
+      environment()->UpdateEffectDependency(on_exception);
+      environment()->BindAccumulator(on_exception);
+      environment()->SetContext(context);
+      MergeIntoSuccessorEnvironment(handler_offset);
+      set_environment(success_env);
+    }
+    // Add implicit success continuation for throwing nodes.
+    if (!result->op()->HasProperty(Operator::kNoThrow)) {
+      const Operator* if_success = common()->IfSuccess();
+      Node* on_success = graph()->NewNode(if_success, result);
+      environment()->UpdateControlDependency(on_success);
     }
   }
 
@@ -2028,13 +1761,6 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
-  if (environment()->IsMarkedAsUnreachable()) return;
-  environment()->MarkAsUnreachable();
-  exit_controls_.push_back(exit);
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 94a278c..2fa5967 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -23,19 +23,14 @@
                        JSGraph* jsgraph);
 
   // Creates a graph by visiting bytecodes.
-  bool CreateGraph(bool stack_check = true);
-
-  Graph* graph() const { return jsgraph_->graph(); }
+  bool CreateGraph();
 
  private:
   class Environment;
   class FrameStateBeforeAndAfter;
 
-  void CreateGraphBody(bool stack_check);
   void VisitBytecodes();
 
-  Node* LoadAccumulator(Node* value);
-
   // Get or create the node that represents the outer function closure.
   Node* GetFunctionClosure();
 
@@ -45,13 +40,6 @@
   // Get or create the node that represents the incoming new target value.
   Node* GetNewTarget();
 
-  // Builder for accessing a (potentially immutable) object field.
-  Node* BuildLoadObjectField(Node* object, int offset);
-  Node* BuildLoadImmutableObjectField(Node* object, int offset);
-
-  // Builder for accessing type feedback vector.
-  Node* BuildLoadFeedbackVector();
-
   // Builder for loading the a native context field.
   Node* BuildLoadNativeContextField(int index);
 
@@ -111,91 +99,102 @@
   Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
                  bool incomplete);
 
-  // Helper to indicate a node exits the function body.
-  void UpdateControlDependencyToLeaveFunction(Node* exit);
-
   Node** EnsureInputBufferSize(int size);
 
   Node* ProcessCallArguments(const Operator* call_op, Node* callee,
                              interpreter::Register receiver, size_t arity);
-  Node* ProcessCallNewArguments(const Operator* call_new_op,
-                                interpreter::Register callee,
+  Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
+                                Node* new_target,
                                 interpreter::Register first_arg, size_t arity);
   Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
                                     interpreter::Register first_arg,
                                     size_t arity);
 
-  void BuildCreateLiteral(const Operator* op,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateRegExpLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateArrayLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateObjectLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateArguments(CreateArgumentsParameters::Type type,
-                            const interpreter::BytecodeArrayIterator& iterator);
-  void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
-                       TypeofMode typeof_mode);
-  void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildLdaLookupSlot(TypeofMode typeof_mode,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildStaLookupSlot(LanguageMode language_mode,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildBinaryOp(const Operator* op,
-                     const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCompareOp(const Operator* op,
-                      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCastOperator(const Operator* js_op,
-                         const interpreter::BytecodeArrayIterator& iterator);
+  void BuildCreateLiteral(const Operator* op);
+  void BuildCreateRegExpLiteral();
+  void BuildCreateArrayLiteral();
+  void BuildCreateObjectLiteral();
+  void BuildCreateArguments(CreateArgumentsType type);
+  void BuildLoadGlobal(TypeofMode typeof_mode);
+  void BuildStoreGlobal(LanguageMode language_mode);
+  void BuildNamedLoad();
+  void BuildKeyedLoad();
+  void BuildNamedStore(LanguageMode language_mode);
+  void BuildKeyedStore(LanguageMode language_mode);
+  void BuildLdaLookupSlot(TypeofMode typeof_mode);
+  void BuildStaLookupSlot(LanguageMode language_mode);
+  void BuildCall(TailCallMode tail_call_mode);
+  void BuildCallJSRuntime();
+  void BuildCallRuntime();
+  void BuildCallRuntimeForPair();
+  void BuildCallConstruct();
+  void BuildThrow();
+  void BuildBinaryOp(const Operator* op);
+  void BuildCompareOp(const Operator* op);
+  void BuildDelete(LanguageMode language_mode);
+  void BuildCastOperator(const Operator* op);
+  void BuildForInPrepare();
+  void BuildForInNext();
 
   // Control flow plumbing.
-  void BuildJump(int source_offset, int target_offset);
   void BuildJump();
   void BuildConditionalJump(Node* condition);
   void BuildJumpIfEqual(Node* comperand);
   void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+  void BuildJumpIfNotHole();
 
-  // Constructing merge and loop headers.
-  void MergeEnvironmentsOfBackwardBranches(int source_offset,
-                                           int target_offset);
-  void MergeEnvironmentsOfForwardBranches(int source_offset);
-  void BuildLoopHeaderForBackwardBranches(int source_offset);
+  // Simulates control flow by forward-propagating environments.
+  void MergeIntoSuccessorEnvironment(int target_offset);
+  void BuildLoopHeaderEnvironment(int current_offset);
+  void SwitchToMergeEnvironment(int current_offset);
 
-  // Attaches a frame state to |node| for the entry to the function.
-  void PrepareEntryFrameState(Node* node);
+  // Simulates control flow that exits the function body.
+  void MergeControlToLeaveFunction(Node* exit);
+
+  // Simulates entry and exit of exception handlers.
+  void EnterAndExitExceptionHandlers(int current_offset);
 
   // Growth increment for the temporary buffer used to construct input lists to
   // new nodes.
   static const int kInputBufferSizeIncrement = 64;
 
+  // The catch prediction from the handler table is reused.
+  typedef HandlerTable::CatchPrediction CatchPrediction;
+
+  // An abstract representation for an exception handler that is being
+  // entered and exited while the graph builder is iterating over the
+  // underlying bytecode. The exception handlers within the bytecode are
+  // well scoped, hence will form a stack during iteration.
+  struct ExceptionHandler {
+    int start_offset_;      // Start offset of the handled area in the bytecode.
+    int end_offset_;        // End offset of the handled area in the bytecode.
+    int handler_offset_;    // Handler entry offset within the bytecode.
+    int context_register_;  // Index of register holding handler context.
+    CatchPrediction pred_;  // Prediction of whether handler is catching.
+  };
+
   // Field accessors
+  Graph* graph() const { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() const { return jsgraph_->common(); }
   Zone* graph_zone() const { return graph()->zone(); }
-  CompilationInfo* info() const { return info_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
   Zone* local_zone() const { return local_zone_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
   }
+  const Handle<HandlerTable>& exception_handler_table() const {
+    return exception_handler_table_;
+  }
+  const Handle<TypeFeedbackVector>& feedback_vector() const {
+    return feedback_vector_;
+  }
   const FrameStateFunctionInfo* frame_state_function_info() const {
     return frame_state_function_info_;
   }
 
-  LanguageMode language_mode() const {
-    // TODO(mythria): Don't rely on parse information to get language mode.
-    return info()->language_mode();
-  }
-
-  const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
-    return bytecode_iterator_;
+  const interpreter::BytecodeArrayIterator& bytecode_iterator() const {
+    return *bytecode_iterator_;
   }
 
   void set_bytecode_iterator(
@@ -211,28 +210,32 @@
     branch_analysis_ = branch_analysis;
   }
 
-#define DECLARE_VISIT_BYTECODE(name, ...) \
-  void Visit##name(const interpreter::BytecodeArrayIterator& iterator);
+#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
   BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
 #undef DECLARE_VISIT_BYTECODE
 
   Zone* local_zone_;
-  CompilationInfo* info_;
   JSGraph* jsgraph_;
   Handle<BytecodeArray> bytecode_array_;
+  Handle<HandlerTable> exception_handler_table_;
+  Handle<TypeFeedbackVector> feedback_vector_;
   const FrameStateFunctionInfo* frame_state_function_info_;
   const interpreter::BytecodeArrayIterator* bytecode_iterator_;
   const BytecodeBranchAnalysis* branch_analysis_;
   Environment* environment_;
 
+  // Indicates whether deoptimization support is enabled for this compilation
+  // and whether valid frame states need to be attached to deoptimizing nodes.
+  bool deoptimization_enabled_;
 
-  // Merge environments are snapshots of the environment at a particular
-  // bytecode offset to be merged into a later environment.
+  // Merge environments are snapshots of the environment at points where the
+  // control flow merges. This models a forward data flow propagation of all
+  // values from all predecessors of the merge in question.
   ZoneMap<int, Environment*> merge_environments_;
 
-  // Loop header environments are environments created for bytecodes
-  // where it is known there are back branches, ie a loop header.
-  ZoneMap<int, Environment*> loop_header_environments_;
+  // Exception handlers currently entered by the iteration.
+  ZoneStack<ExceptionHandler> exception_handlers_;
+  int current_exception_handler_;
 
   // Temporary storage for building node input lists.
   int input_buffer_size_;
@@ -243,100 +246,12 @@
   SetOncePointer<Node> function_closure_;
   SetOncePointer<Node> new_target_;
 
-  // Optimization to cache loaded feedback vector.
-  SetOncePointer<Node> feedback_vector_;
-
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
 };
 
-
-class BytecodeGraphBuilder::Environment : public ZoneObject {
- public:
-  Environment(BytecodeGraphBuilder* builder, int register_count,
-              int parameter_count, Node* control_dependency, Node* context);
-
-  int parameter_count() const { return parameter_count_; }
-  int register_count() const { return register_count_; }
-
-  Node* LookupAccumulator() const;
-  Node* LookupRegister(interpreter::Register the_register) const;
-
-  void ExchangeRegisters(interpreter::Register reg0,
-                         interpreter::Register reg1);
-
-  void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
-  void BindRegister(interpreter::Register the_register, Node* node,
-                    FrameStateBeforeAndAfter* states = nullptr);
-  void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
-                                  FrameStateBeforeAndAfter* states = nullptr);
-  void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
-
-  bool IsMarkedAsUnreachable() const;
-  void MarkAsUnreachable();
-
-  // Effect dependency tracked by this environment.
-  Node* GetEffectDependency() { return effect_dependency_; }
-  void UpdateEffectDependency(Node* dependency) {
-    effect_dependency_ = dependency;
-  }
-
-  // Preserve a checkpoint of the environment for the IR graph. Any
-  // further mutation of the environment will not affect checkpoints.
-  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
-
-  // Returns true if the state values are up to date with the current
-  // environment.
-  bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
-
-  // Control dependency tracked by this environment.
-  Node* GetControlDependency() const { return control_dependency_; }
-  void UpdateControlDependency(Node* dependency) {
-    control_dependency_ = dependency;
-  }
-
-  Node* Context() const { return context_; }
-  void SetContext(Node* new_context) { context_ = new_context; }
-
-  Environment* CopyForConditional() const;
-  Environment* CopyForLoop();
-  void Merge(Environment* other);
-
- private:
-  explicit Environment(const Environment* copy);
-  void PrepareForLoop();
-  bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
-                              int output_poke_start, int output_poke_end);
-  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
-  void UpdateStateValues(Node** state_values, int offset, int count);
-
-  int RegisterToValuesIndex(interpreter::Register the_register) const;
-
-  Zone* zone() const { return builder_->local_zone(); }
-  Graph* graph() const { return builder_->graph(); }
-  CommonOperatorBuilder* common() const { return builder_->common(); }
-  BytecodeGraphBuilder* builder() const { return builder_; }
-  const NodeVector* values() const { return &values_; }
-  NodeVector* values() { return &values_; }
-  int register_base() const { return register_base_; }
-  int accumulator_base() const { return accumulator_base_; }
-
-  BytecodeGraphBuilder* builder_;
-  int register_count_;
-  int parameter_count_;
-  Node* context_;
-  Node* control_dependency_;
-  Node* effect_dependency_;
-  NodeVector values_;
-  Node* parameters_state_values_;
-  Node* registers_state_values_;
-  Node* accumulator_state_values_;
-  int register_base_;
-  int accumulator_base_;
-};
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index 44e0bf1..783d9d6 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -90,6 +90,7 @@
 // ===========================================================================
 // == mips ===================================================================
 // ===========================================================================
+#define STACK_SHADOW_WORDS 4
 #define PARAM_REGISTERS a0, a1, a2, a3
 #define CALLEE_SAVE_REGISTERS                                                  \
   s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
@@ -133,23 +134,22 @@
 
 // General code uses the above configuration data.
 CallDescriptor* Linkage::GetSimplifiedCDescriptor(
-    Zone* zone, const MachineSignature* msig) {
+    Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
   LocationSignature::Builder locations(zone, msig->return_count(),
                                        msig->parameter_count());
-#if 0  // TODO(titzer): instruction selector tests break here.
   // Check the types of the signature.
   // Currently no floating point parameters or returns are allowed because
   // on x87 and ia32, the FP top of stack is involved.
-
   for (size_t i = 0; i < msig->return_count(); i++) {
-    MachineType type = RepresentationOf(msig->GetReturn(i));
-    CHECK(type != kRepFloat32 && type != kRepFloat64);
+    MachineRepresentation rep = msig->GetReturn(i).representation();
+    CHECK_NE(MachineRepresentation::kFloat32, rep);
+    CHECK_NE(MachineRepresentation::kFloat64, rep);
   }
   for (size_t i = 0; i < msig->parameter_count(); i++) {
-    MachineType type = RepresentationOf(msig->GetParam(i));
-    CHECK(type != kRepFloat32 && type != kRepFloat64);
+    MachineRepresentation rep = msig->GetParam(i).representation();
+    CHECK_NE(MachineRepresentation::kFloat32, rep);
+    CHECK_NE(MachineRepresentation::kFloat64, rep);
   }
-#endif
 
 #ifdef UNSUPPORTED_C_LINKAGE
   // This method should not be called on unknown architectures.
@@ -220,7 +220,9 @@
       Operator::kNoProperties,       // properties
       kCalleeSaveRegisters,          // callee-saved registers
       kCalleeSaveFPRegisters,        // callee-saved fp regs
-      CallDescriptor::kNoFlags,      // flags
+      set_initialize_root_flag ?     // flags
+          CallDescriptor::kInitializeRootRegister
+                               : CallDescriptor::kNoFlags,
       "c-call");
 }
 
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index f791db1..e217f37 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -49,6 +49,12 @@
       return StoreElement(node);
     case IrOpcode::kAllocate:
       return Allocate(node);
+    case IrOpcode::kObjectIsReceiver:
+      return ObjectIsReceiver(node);
+    case IrOpcode::kObjectIsSmi:
+      return ObjectIsSmi(node);
+    case IrOpcode::kObjectIsNumber:
+      return ObjectIsNumber(node);
     default:
       return NoChange();
   }
@@ -582,6 +588,76 @@
   return Changed(node);
 }
 
+Node* ChangeLowering::IsSmi(Node* value) {
+  return graph()->NewNode(
+      machine()->WordEqual(),
+      graph()->NewNode(machine()->WordAnd(), value,
+                       jsgraph()->IntPtrConstant(kSmiTagMask)),
+      jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), object,
+      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+      graph()->start(), control);
+}
+
+Node* ChangeLowering::LoadMapInstanceType(Node* map) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::Uint8()), map,
+      jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
+      graph()->start(), graph()->start());
+}
+
+Reduction ChangeLowering::ObjectIsNumber(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(1);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(
+      machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
+      jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse =
+      graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+                       jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+                       LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsSmi(Node* node) {
+  node->ReplaceInput(0,
+                     graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+                                      jsgraph()->IntPtrConstant(kSmiTagMask)));
+  node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+  NodeProperties::ChangeOp(node, machine()->WordEqual());
+  return Changed(node);
+}
 
 Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
 
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index 6d60776..defadd9 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -56,6 +56,14 @@
   Reduction StoreElement(Node* node);
   Reduction Allocate(Node* node);
 
+  Node* IsSmi(Node* value);
+  Node* LoadHeapObjectMap(Node* object, Node* control);
+  Node* LoadMapInstanceType(Node* map);
+
+  Reduction ObjectIsNumber(Node* node);
+  Reduction ObjectIsReceiver(Node* node);
+  Reduction ObjectIsSmi(Node* node);
+
   Node* ComputeIndex(const ElementAccess& access, Node* const key);
   Graph* graph() const;
   Isolate* isolate() const;
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 313567e..712cfe0 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -78,10 +78,12 @@
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm());
   }
-
   // Architecture-specific, linkage-specific prologue.
   info->set_prologue_offset(masm()->pc_offset());
   AssemblePrologue();
+  if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+    masm()->InitializeRootRegister();
+  }
 
   // Define deoptimization literals for all inlined functions.
   DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -175,12 +177,12 @@
     }
   }
 
-  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+  safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
 
   Handle<Code> result =
       v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
   result->set_is_turbofanned(true);
-  result->set_stack_slots(frame()->GetSpillSlotCount());
+  result->set_stack_slots(frame()->GetTotalFrameSlotCount());
   result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
 
   // Emit exception handler table.
@@ -234,9 +236,12 @@
     if (operand.IsStackSlot()) {
       int index = LocationOperand::cast(operand).index();
       DCHECK(index >= 0);
-      // Safepoint table indices are 0-based from the beginning of the spill
-      // slot area, adjust appropriately.
-      index -= stackSlotToSpillSlotDelta;
+      // We might index values in the fixed part of the frame (i.e. the
+      // closure pointer or the context pointer); these are not spill slots
+      // and therefore don't work with the SafepointTable currently, but
+      // we also don't need to worry about them, since the GC has special
+      // knowledge about those fields anyway.
+      if (index < stackSlotToSpillSlotDelta) continue;
       safepoint.DefinePointerSlot(index, zone());
     } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
       Register reg = LocationOperand::cast(operand).GetRegister();
@@ -583,7 +588,7 @@
     case FrameStateType::kInterpretedFunction:
       translation->BeginInterpretedFrame(
           descriptor->bailout_id(), shared_info_id,
-          static_cast<unsigned int>(descriptor->locals_count()));
+          static_cast<unsigned int>(descriptor->locals_count() + 1));
       break;
     case FrameStateType::kArgumentsAdaptor:
       translation->BeginArgumentsAdaptorFrame(
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
index b2a05b6..45f47d3 100644
--- a/src/compiler/code-stub-assembler.cc
+++ b/src/compiler/code-stub-assembler.cc
@@ -24,28 +24,33 @@
 namespace internal {
 namespace compiler {
 
-
 CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
                                      const CallInterfaceDescriptor& descriptor,
-                                     Code::Kind kind, const char* name)
+                                     Code::Flags flags, const char* name,
+                                     size_t result_size)
     : raw_assembler_(new RawMachineAssembler(
           isolate, new (zone) Graph(zone),
-          Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
-                                         CallDescriptor::kNoFlags))),
-      kind_(kind),
+          Linkage::GetStubCallDescriptor(
+              isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+              CallDescriptor::kNoFlags, Operator::kNoProperties,
+              MachineType::AnyTagged(), result_size))),
+      flags_(flags),
       name_(name),
-      code_generated_(false) {}
-
+      code_generated_(false),
+      variables_(zone) {}
 
 CodeStubAssembler::~CodeStubAssembler() {}
 
+void CodeStubAssembler::CallPrologue() {}
+
+void CodeStubAssembler::CallEpilogue() {}
 
 Handle<Code> CodeStubAssembler::GenerateCode() {
   DCHECK(!code_generated_);
 
   Schedule* schedule = raw_assembler_->Export();
   Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
       name_);
 
   code_generated_ = true;
@@ -77,6 +82,9 @@
   return raw_assembler_->BooleanConstant(value);
 }
 
+Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
+  return raw_assembler_->ExternalConstant(address);
+}
 
 Node* CodeStubAssembler::Parameter(int value) {
   return raw_assembler_->Parameter(value);
@@ -87,6 +95,21 @@
   return raw_assembler_->Return(value);
 }
 
+void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
+  return label->Bind();
+}
+
+Node* CodeStubAssembler::LoadFramePointer() {
+  return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeStubAssembler::LoadParentFramePointer() {
+  return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeStubAssembler::LoadStackPointer() {
+  return raw_assembler_->LoadStackPointer();
+}
 
 Node* CodeStubAssembler::SmiShiftBitsConstant() {
   return Int32Constant(kSmiShiftSize + kSmiTagSize);
@@ -102,31 +125,117 @@
   return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
 }
 
+#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name)   \
+  Node* CodeStubAssembler::name(Node* a, Node* b) { \
+    return raw_assembler_->name(a, b);              \
+  }
+CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
 
-Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
-  return raw_assembler_->IntPtrAdd(a, b);
+Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
+  return raw_assembler_->ChangeInt32ToInt64(value);
 }
 
-
-Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
-  return raw_assembler_->IntPtrSub(a, b);
-}
-
-
 Node* CodeStubAssembler::WordShl(Node* value, int shift) {
   return raw_assembler_->WordShl(value, Int32Constant(shift));
 }
 
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+  return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
+                   Int32Constant(0));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
+  return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
+                              IntPtrConstant(offset));
+}
 
 Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
   return raw_assembler_->Load(MachineType::AnyTagged(), object,
                               IntPtrConstant(offset - kHeapObjectTag));
 }
 
+Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
+                                                       Node* smi_index,
+                                                       int additional_offset) {
+  Node* header_size = raw_assembler_->Int32Constant(
+      additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+  Node* scaled_index =
+      (kSmiShiftSize == 0)
+          ? raw_assembler_->Word32Shl(
+                smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
+          : raw_assembler_->Word32Shl(SmiUntag(smi_index),
+                                      Int32Constant(kPointerSizeLog2));
+  Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
+  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
+                                                            int index) {
+  Node* offset = raw_assembler_->Int32Constant(
+      FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
+  if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+    Handle<Object> root = isolate()->heap()->root_handle(root_index);
+    if (root->IsSmi()) {
+      return Int32Constant(Handle<Smi>::cast(root)->value());
+    } else {
+      return HeapConstant(Handle<HeapObject>::cast(root));
+    }
+  }
+
+  compiler::Node* roots_array_start =
+      ExternalConstant(ExternalReference::roots_array_start(isolate()));
+  USE(roots_array_start);
+
+  // TODO(danno): Implement thee root-access case where the root is not constant
+  // and must be loaded from the root array.
+  UNIMPLEMENTED();
+  return nullptr;
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
+  return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
+  return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+                               Node* value) {
+  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+                               Node* index, Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+                                             Node* base, Node* value) {
+  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+                                             Node* base, Node* index,
+                                             Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::Projection(int index, Node* value) {
+  return raw_assembler_->Projection(index, value);
+}
 
 Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
                                Node** args) {
-  return raw_assembler_->CallN(descriptor, code_target, args);
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+  CallEpilogue();
+  return return_value;
 }
 
 
@@ -135,41 +244,371 @@
   return raw_assembler_->TailCallN(descriptor, code_target, args);
 }
 
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+  CallEpilogue();
+  return return_value;
+}
 
 Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
                                      Node* context, Node* arg1) {
-  return raw_assembler_->CallRuntime1(function_id, arg1, context);
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+  CallEpilogue();
+  return return_value;
 }
 
-
 Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
                                      Node* context, Node* arg1, Node* arg2) {
-  return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+  CallEpilogue();
+  return return_value;
 }
 
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3) {
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3, Node* arg4) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+                                                    arg3, arg4, context);
+  CallEpilogue();
+  return return_value;
+}
 
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1) {
   return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
 }
 
-
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1,
                                          Node* arg2) {
   return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
 }
 
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context, Node* arg1, Node* arg2,
+                                         Node* arg3) {
+  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+                                          context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context, Node* arg1, Node* arg2,
+                                         Node* arg3, Node* arg4) {
+  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+                                          context);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(2);
+  args[0] = arg1;
+  args[1] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(4);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(5);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  Node* arg5, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(6);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = arg5;
+  args[5] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
+  Node* code_target = HeapConstant(stub.GetCode());
+  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+      stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
+  return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeStubAssembler::TailCall(
+    const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
+    Node** args, size_t result_size) {
+  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), interface_descriptor,
+      interface_descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+  return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
+  label->MergeVariables();
+  raw_assembler_->Goto(label->label_);
+}
+
+void CodeStubAssembler::Branch(Node* condition,
+                               CodeStubAssembler::Label* true_label,
+                               CodeStubAssembler::Label* false_label) {
+  true_label->MergeVariables();
+  false_label->MergeVariables();
+  return raw_assembler_->Branch(condition, true_label->label_,
+                                false_label->label_);
+}
+
+void CodeStubAssembler::Switch(Node* index, Label* default_label,
+                               int32_t* case_values, Label** case_labels,
+                               size_t case_count) {
+  RawMachineLabel** labels =
+      new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+          RawMachineLabel*[case_count];
+  for (size_t i = 0; i < case_count; ++i) {
+    labels[i] = case_labels[i]->label_;
+    case_labels[i]->MergeVariables();
+    default_label->MergeVariables();
+  }
+  return raw_assembler_->Switch(index, default_label->label_, case_values,
+                                labels, case_count);
+}
 
 // RawMachineAssembler delegate helpers:
 Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
 
-
 Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
 
-
 Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
 
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeStubAssembler::Variable::Impl : public ZoneObject {
+ public:
+  explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+  Node* value_;
+  MachineRepresentation rep_;
+};
+
+CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
+                                      MachineRepresentation rep)
+    : impl_(new (assembler->zone()) Impl(rep)) {
+  assembler->variables_.push_back(impl_);
+}
+
+void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeStubAssembler::Variable::value() const {
+  DCHECK_NOT_NULL(impl_->value_);
+  return impl_->value_;
+}
+
+MachineRepresentation CodeStubAssembler::Variable::rep() const {
+  return impl_->rep_;
+}
+
+bool CodeStubAssembler::Variable::IsBound() const {
+  return impl_->value_ != nullptr;
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
+    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+  label_ = new (buffer) RawMachineLabel();
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+                                int merged_value_count,
+                                CodeStubAssembler::Variable** merged_variables)
+    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+  label_ = new (buffer) RawMachineLabel();
+  for (int i = 0; i < merged_value_count; ++i) {
+    variable_phis_[merged_variables[i]->impl_] = nullptr;
+  }
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+                                CodeStubAssembler::Variable* merged_variable)
+    : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
+
+void CodeStubAssembler::Label::MergeVariables() {
+  ++merge_count_;
+  for (auto var : assembler_->variables_) {
+    size_t count = 0;
+    Node* node = var->value_;
+    if (node != nullptr) {
+      auto i = variable_merges_.find(var);
+      if (i != variable_merges_.end()) {
+        i->second.push_back(node);
+        count = i->second.size();
+      } else {
+        count = 1;
+        variable_merges_[var] = std::vector<Node*>(1, node);
+      }
+    }
+    // If the following asserts, then you've jumped to a label without a bound
+    // variable along that path that expects to merge its value into a phi.
+    DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+           count == merge_count_);
+    USE(count);
+
+    // If the label is already bound, we already know the set of variables to
+    // merge and phi nodes have already been created.
+    if (bound_) {
+      auto phi = variable_phis_.find(var);
+      if (phi != variable_phis_.end()) {
+        DCHECK_NOT_NULL(phi->second);
+        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+      } else {
+        auto i = variable_merges_.find(var);
+        USE(i);
+        // If the following assert fires, then you've declared a variable that
+        // has the same bound value along all paths up until the point you bound
+        // this label, but then later merged a path with a new value for the
+        // variable after the label bind (it's not possible to add phis to the
+        // bound label after the fact, just make sure to list the variable in
+        // the label's constructor's list of merged variables).
+        DCHECK(find_if(i->second.begin(), i->second.end(),
+                       [node](Node* e) -> bool { return node != e; }) ==
+               i->second.end());
+      }
+    }
+  }
+}
+
+void CodeStubAssembler::Label::Bind() {
+  DCHECK(!bound_);
+  assembler_->raw_assembler_->Bind(label_);
+
+  // Make sure that all variables that have changed along any path up to this
+  // point are marked as merge variables.
+  for (auto var : assembler_->variables_) {
+    Node* shared_value = nullptr;
+    auto i = variable_merges_.find(var);
+    if (i != variable_merges_.end()) {
+      for (auto value : i->second) {
+        DCHECK(value != nullptr);
+        if (value != shared_value) {
+          if (shared_value == nullptr) {
+            shared_value = value;
+          } else {
+            variable_phis_[var] = nullptr;
+          }
+        }
+      }
+    }
+  }
+
+  for (auto var : variable_phis_) {
+    CodeStubAssembler::Variable::Impl* var_impl = var.first;
+    auto i = variable_merges_.find(var_impl);
+    // If the following assert fires, then a variable that has been marked as
+    // being merged at the label--either by explicitly marking it so in the
+    // label constructor or by having seen different bound values at branches
+    // into the label--doesn't have a bound value along all of the paths that
+    // have been merged into the label up to this point.
+    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+    Node* phi = assembler_->raw_assembler_->Phi(
+        var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+    variable_phis_[var_impl] = phi;
+  }
+
+  // Bind all variables to a merge phi, the common value along all paths or
+  // null.
+  for (auto var : assembler_->variables_) {
+    auto i = variable_phis_.find(var);
+    if (i != variable_phis_.end()) {
+      var->value_ = i->second;
+    } else {
+      auto j = variable_merges_.find(var);
+      if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+        var->value_ = j->second.back();
+      } else {
+        var->value_ = nullptr;
+      }
+    }
+  }
+
+  bound_ = true;
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
index 3c4ae05..2ab1376 100644
--- a/src/compiler/code-stub-assembler.h
+++ b/src/compiler/code-stub-assembler.h
@@ -5,11 +5,16 @@
 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
 
+#include <map>
+
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
 #include "src/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
 #include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -25,48 +30,196 @@
 class Node;
 class Operator;
 class RawMachineAssembler;
+class RawMachineLabel;
 class Schedule;
 
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
+  V(IntPtrAdd)                                \
+  V(IntPtrSub)                                \
+  V(Int32Add)                                 \
+  V(Int32Sub)                                 \
+  V(Int32Mul)                                 \
+  V(Int32GreaterThanOrEqual)                  \
+  V(WordEqual)                                \
+  V(WordNotEqual)                             \
+  V(WordOr)                                   \
+  V(WordAnd)                                  \
+  V(WordXor)                                  \
+  V(WordShl)                                  \
+  V(WordShr)                                  \
+  V(WordSar)                                  \
+  V(WordRor)                                  \
+  V(Word32Equal)                              \
+  V(Word32NotEqual)                           \
+  V(Word32Or)                                 \
+  V(Word32And)                                \
+  V(Word32Xor)                                \
+  V(Word32Shl)                                \
+  V(Word32Shr)                                \
+  V(Word32Sar)                                \
+  V(Word32Ror)                                \
+  V(Word64Equal)                              \
+  V(Word64NotEqual)                           \
+  V(Word64Or)                                 \
+  V(Word64And)                                \
+  V(Word64Xor)                                \
+  V(Word64Shr)                                \
+  V(Word64Sar)                                \
+  V(Word64Ror)                                \
+  V(UintPtrGreaterThanOrEqual)
+
 class CodeStubAssembler {
  public:
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
   CodeStubAssembler(Isolate* isolate, Zone* zone,
-                    const CallInterfaceDescriptor& descriptor, Code::Kind kind,
-                    const char* name);
+                    const CallInterfaceDescriptor& descriptor,
+                    Code::Flags flags, const char* name,
+                    size_t result_size = 1);
   virtual ~CodeStubAssembler();
 
   Handle<Code> GenerateCode();
 
+  class Label;
+  class Variable {
+   public:
+    explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
+    void Bind(Node* value);
+    Node* value() const;
+    MachineRepresentation rep() const;
+    bool IsBound() const;
+
+   private:
+    friend class CodeStubAssembler;
+    class Impl;
+    Impl* impl_;
+  };
+
+  // ===========================================================================
+  // Base Assembler
+  // ===========================================================================
+
   // Constants.
   Node* Int32Constant(int value);
   Node* IntPtrConstant(intptr_t value);
   Node* NumberConstant(double value);
   Node* HeapConstant(Handle<HeapObject> object);
   Node* BooleanConstant(bool value);
+  Node* ExternalConstant(ExternalReference address);
 
   Node* Parameter(int value);
   void Return(Node* value);
 
-  // Tag and untag Smi values.
-  Node* SmiTag(Node* value);
-  Node* SmiUntag(Node* value);
+  void Bind(Label* label);
+  void Goto(Label* label);
+  void Branch(Node* condition, Label* true_label, Label* false_label);
 
-  // Basic arithmetic operations.
-  Node* IntPtrAdd(Node* a, Node* b);
-  Node* IntPtrSub(Node* a, Node* b);
+  void Switch(Node* index, Label* default_label, int32_t* case_values,
+              Label** case_labels, size_t case_count);
+
+  // Access to the frame pointer
+  Node* LoadFramePointer();
+  Node* LoadParentFramePointer();
+
+  // Access to the stack pointer
+  Node* LoadStackPointer();
+
+  // Load raw memory location.
+  Node* Load(MachineType rep, Node* base);
+  Node* Load(MachineType rep, Node* base, Node* index);
+
+  // Store value to raw memory location.
+  Node* Store(MachineRepresentation rep, Node* base, Node* value);
+  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+                            Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
+  CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
+
   Node* WordShl(Node* value, int shift);
 
-  // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
+  // Conversions
+  Node* ChangeInt32ToInt64(Node* value);
 
-  // Call runtime function.
+  // Projections
+  Node* Projection(int index, Node* value);
+
+  // Calls
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
                     Node* arg2);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
 
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 Node* arg5, size_t result_size = 1);
+
+  Node* TailCallStub(CodeStub& stub, Node** args);
+  Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node** args, size_t result_size = 1);
+
+  // ===========================================================================
+  // Macros
+  // ===========================================================================
+
+  // Tag and untag Smi values.
+  Node* SmiTag(Node* value);
+  Node* SmiUntag(Node* value);
+
+  // Load a value from the root array.
+  Node* LoadRoot(Heap::RootListIndex root_index);
+
+  // Check a value for smi-ness
+  Node* WordIsSmi(Node* a);
+
+  // Load an object pointer from a buffer that isn't in the heap.
+  Node* LoadBufferObject(Node* buffer, int offset);
+  // Load a field from an object on the heap.
+  Node* LoadObjectField(Node* object, int offset);
+
+  // Load an array element from a FixedArray.
+  Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
+                                      int additional_offset = 0);
+  Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
+
+ protected:
+  // Protected helpers which delegate to RawMachineAssembler.
+  Graph* graph();
+  Isolate* isolate();
+  Zone* zone();
+
+  // Enables subclasses to perform operations before and after a call.
+  virtual void CallPrologue();
+  virtual void CallEpilogue();
 
  private:
   friend class CodeStubAssemblerTester;
@@ -76,19 +229,42 @@
 
   Node* SmiShiftBitsConstant();
 
-  // Private helpers which delegate to RawMachineAssembler.
-  Graph* graph();
-  Isolate* isolate();
-  Zone* zone();
-
   base::SmartPointer<RawMachineAssembler> raw_assembler_;
-  Code::Kind kind_;
+  Code::Flags flags_;
   const char* name_;
   bool code_generated_;
+  ZoneVector<Variable::Impl*> variables_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
 };
 
+class CodeStubAssembler::Label {
+ public:
+  explicit Label(CodeStubAssembler* assembler);
+  Label(CodeStubAssembler* assembler, int merged_variable_count,
+        CodeStubAssembler::Variable** merged_variables);
+  Label(CodeStubAssembler* assembler,
+        CodeStubAssembler::Variable* merged_variable);
+  ~Label() {}
+
+ private:
+  friend class CodeStubAssembler;
+
+  void Bind();
+  void MergeVariables();
+
+  bool bound_;
+  size_t merge_count_;
+  CodeStubAssembler* assembler_;
+  RawMachineLabel* label_;
+  // Map of variables that need to be merged to their phi nodes (or placeholders
+  // for those phis).
+  std::map<Variable::Impl*, Node*> variable_phis_;
+  // Map of variables to the list of value nodes that have been added from each
+  // merge path in their order of merging.
+  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index be77309..c92bae9 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -803,11 +803,6 @@
 }
 
 
-const Operator* CommonOperatorBuilder::LazyBailout() {
-  return Call(Linkage::GetLazyBailoutDescriptor(zone()));
-}
-
-
 const Operator* CommonOperatorBuilder::TailCall(
     const CallDescriptor* descriptor) {
   class TailCallOperator final : public Operator1<const CallDescriptor*> {
@@ -866,11 +861,9 @@
 const FrameStateFunctionInfo*
 CommonOperatorBuilder::CreateFrameStateFunctionInfo(
     FrameStateType type, int parameter_count, int local_count,
-    Handle<SharedFunctionInfo> shared_info,
-    ContextCallingMode context_calling_mode) {
+    Handle<SharedFunctionInfo> shared_info) {
   return new (zone()->New(sizeof(FrameStateFunctionInfo)))
-      FrameStateFunctionInfo(type, parameter_count, local_count, shared_info,
-                             context_calling_mode);
+      FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 83cb5b2..7c3f3da 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -14,11 +14,7 @@
 
 // Forward declarations.
 class ExternalReference;
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
-
+class Type;
 
 namespace compiler {
 
@@ -174,7 +170,6 @@
   const Operator* Call(const CallDescriptor* descriptor);
   const Operator* TailCall(const CallDescriptor* descriptor);
   const Operator* Projection(size_t index);
-  const Operator* LazyBailout();
 
   // Constructs a new merge or phi operator with the same opcode as {op}, but
   // with {size} inputs.
@@ -183,8 +178,7 @@
   // Constructs function info for frame state construction.
   const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
       FrameStateType type, int parameter_count, int local_count,
-      Handle<SharedFunctionInfo> shared_info,
-      ContextCallingMode context_calling_mode);
+      Handle<SharedFunctionInfo> shared_info);
 
  private:
   Zone* zone() const { return zone_; }
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index df8b65d..313b639 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -11,6 +11,15 @@
 namespace internal {
 namespace compiler {
 
+#ifdef DEBUG
+#define TRACE(...)                                    \
+  do {                                                \
+    if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif  // DEBUG
+
 EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
                                              EscapeAnalysis* escape_analysis,
                                              Zone* zone)
@@ -18,10 +27,16 @@
       jsgraph_(jsgraph),
       escape_analysis_(escape_analysis),
       zone_(zone),
-      visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+      fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
+      exists_virtual_allocate_(true) {}
 
 
 Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return NoChange();
+  }
+
   switch (node->opcode()) {
     case IrOpcode::kLoadField:
     case IrOpcode::kLoadElement:
@@ -37,11 +52,44 @@
       return ReduceReferenceEqual(node);
     case IrOpcode::kObjectIsSmi:
       return ReduceObjectIsSmi(node);
+    // FrameStates and Value nodes are preprocessed here,
+    // and visited via ReduceFrameStateUses from their user nodes.
+    case IrOpcode::kFrameState:
+    case IrOpcode::kStateValues: {
+      if (node->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+          fully_reduced_.Contains(node->id())) {
+        break;
+      }
+      bool depends_on_object_state = false;
+      for (int i = 0; i < node->InputCount(); i++) {
+        Node* input = node->InputAt(i);
+        switch (input->opcode()) {
+          case IrOpcode::kAllocate:
+          case IrOpcode::kFinishRegion:
+            depends_on_object_state =
+                depends_on_object_state || escape_analysis()->IsVirtual(input);
+            break;
+          case IrOpcode::kFrameState:
+          case IrOpcode::kStateValues:
+            depends_on_object_state =
+                depends_on_object_state ||
+                input->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+                !fully_reduced_.Contains(input->id());
+            break;
+          default:
+            break;
+        }
+      }
+      if (!depends_on_object_state) {
+        fully_reduced_.Add(node->id());
+      }
+      return NoChange();
+    }
     default:
       // TODO(sigurds): Change this to GetFrameStateInputCount once
       // it is working. For now we use EffectInputCount > 0 to determine
       // whether a node might have a frame state input.
-      if (node->op()->EffectInputCount() > 0) {
+      if (exists_virtual_allocate_ && node->op()->EffectInputCount() > 0) {
         return ReduceFrameStateUses(node);
       }
       break;
@@ -53,17 +101,15 @@
 Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kLoadField ||
          node->opcode() == IrOpcode::kLoadElement);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (Node* rep = escape_analysis()->GetReplacement(node)) {
-    visited_.Add(node->id());
     counters()->turbo_escape_loads_replaced()->Increment();
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
-             node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
-    }
+    TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+          node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
     ReplaceWithValue(node, rep);
-    return Changed(rep);
+    return Replace(rep);
   }
   return NoChange();
 }
@@ -72,13 +118,12 @@
 Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kStoreField ||
          node->opcode() == IrOpcode::kStoreElement);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Removed #%d (%s) from effect chain\n", node->id(),
-             node->op()->mnemonic());
-    }
+    TRACE("Removed #%d (%s) from effect chain\n", node->id(),
+          node->op()->mnemonic());
     RelaxEffectsAndControls(node);
     return Changed(node);
   }
@@ -88,14 +133,13 @@
 
 Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (escape_analysis()->IsVirtual(node)) {
     RelaxEffectsAndControls(node);
     counters()->turbo_escape_allocs_replaced()->Increment();
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Removed allocate #%d from effect chain\n", node->id());
-    }
+    TRACE("Removed allocate #%d from effect chain\n", node->id());
     return Changed(node);
   }
   return NoChange();
@@ -106,8 +150,14 @@
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   Node* effect = NodeProperties::GetEffectInput(node, 0);
   if (effect->opcode() == IrOpcode::kBeginRegion) {
+    // We only add it now to remove empty Begin/Finish region pairs
+    // in the process.
+    if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+      fully_reduced_.Add(node->id());
+    }
     RelaxEffectsAndControls(effect);
     RelaxEffectsAndControls(node);
+#ifdef DEBUG
     if (FLAG_trace_turbo_escape) {
       PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
              node->id());
@@ -117,6 +167,7 @@
       }
       PrintF("\n");
     }
+#endif  // DEBUG
     return Changed(node);
   }
   return NoChange();
@@ -131,22 +182,18 @@
     if (escape_analysis()->IsVirtual(right) &&
         escape_analysis()->CompareVirtualObjects(left, right)) {
       ReplaceWithValue(node, jsgraph()->TrueConstant());
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Replaced ref eq #%d with true\n", node->id());
-      }
+      TRACE("Replaced ref eq #%d with true\n", node->id());
+      Replace(jsgraph()->TrueConstant());
     }
     // Right-hand side is not a virtual object, or a different one.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ref eq #%d with false\n", node->id());
-    }
-    return Replace(node);
+    TRACE("Replaced ref eq #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   } else if (escape_analysis()->IsVirtual(right)) {
     // Left-hand side is not a virtual object.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ref eq #%d with false\n", node->id());
-    }
+    TRACE("Replaced ref eq #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   }
   return NoChange();
 }
@@ -157,24 +204,23 @@
   Node* input = NodeProperties::GetValueInput(node, 0);
   if (escape_analysis()->IsVirtual(input)) {
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
-    }
-    return Replace(node);
+    TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   }
   return NoChange();
 }
 
 
 Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
   DCHECK_GE(node->op()->EffectInputCount(), 1);
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   bool changed = false;
   for (int i = 0; i < node->InputCount(); ++i) {
     Node* input = node->InputAt(i);
     if (input->opcode() == IrOpcode::kFrameState) {
-      if (Node* ret = ReduceFrameState(input, node, false)) {
+      if (Node* ret = ReduceDeoptState(input, node, false)) {
         node->ReplaceInput(i, ret);
         changed = true;
       }
@@ -188,78 +234,56 @@
 
 
 // Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
                                               bool multiple_users) {
-  DCHECK(node->opcode() == IrOpcode::kFrameState);
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing FrameState %d\n", node->id());
+  DCHECK(node->opcode() == IrOpcode::kFrameState ||
+         node->opcode() == IrOpcode::kStateValues);
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return nullptr;
   }
+  TRACE("Reducing %s %d\n", node->op()->mnemonic(), node->id());
   Node* clone = nullptr;
+  bool node_multiused = node->UseCount() > 1;
+  bool multiple_users_rec = multiple_users || node_multiused;
   for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
     Node* input = NodeProperties::GetValueInput(node, i);
-    Node* ret =
-        input->opcode() == IrOpcode::kStateValues
-            ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
-            : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
-    if (ret) {
-      if (node->UseCount() > 1 || multiple_users) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("  Cloning #%d", node->id());
-        }
-        node = clone = jsgraph()->graph()->CloneNode(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" to #%d\n", node->id());
-        }
-        multiple_users = false;  // Don't clone anymore.
-      }
-      NodeProperties::ReplaceValueInput(node, ret, i);
-    }
-  }
-  Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
-    if (Node* ret =
-            ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
-      if (node->UseCount() > 1 || multiple_users) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("  Cloning #%d", node->id());
-        }
-        node = clone = jsgraph()->graph()->CloneNode(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" to #%d\n", node->id());
-        }
-        multiple_users = false;
-      }
-      NodeProperties::ReplaceFrameStateInput(node, 0, ret);
-    }
-  }
-  return clone;
-}
-
-
-// Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
-                                                    bool multiple_users) {
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing StateValue #%d\n", node->id());
-  }
-  DCHECK(node->opcode() == IrOpcode::kStateValues);
-  DCHECK_NOT_NULL(effect);
-  Node* clone = nullptr;
-  for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
-    Node* input = NodeProperties::GetValueInput(node, i);
-    Node* ret = nullptr;
     if (input->opcode() == IrOpcode::kStateValues) {
-      ret = ReduceStateValueInputs(input, effect, multiple_users);
+      if (Node* ret = ReduceDeoptState(input, effect, multiple_users_rec)) {
+        if (node_multiused || (multiple_users && !clone)) {
+          TRACE("  Cloning #%d", node->id());
+          node = clone = jsgraph()->graph()->CloneNode(node);
+          TRACE(" to #%d\n", node->id());
+          node_multiused = false;
+        }
+        NodeProperties::ReplaceValueInput(node, ret, i);
+      }
     } else {
-      ret = ReduceStateValueInput(node, i, effect, multiple_users);
+      if (Node* ret = ReduceStateValueInput(node, i, effect, node_multiused,
+                                            clone, multiple_users)) {
+        DCHECK_NULL(clone);
+        node_multiused = false;  // Don't clone anymore.
+        node = clone = ret;
+      }
     }
-    if (ret) {
-      node = ret;
-      DCHECK_NULL(clone);
-      clone = ret;
-      multiple_users = false;
+  }
+  if (node->opcode() == IrOpcode::kFrameState) {
+    Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+    if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+      if (Node* ret =
+              ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
+        if (node_multiused || (multiple_users && !clone)) {
+          TRACE("    Cloning #%d", node->id());
+          node = clone = jsgraph()->graph()->CloneNode(node);
+          TRACE(" to #%d\n", node->id());
+        }
+        NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+      }
     }
   }
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   return clone;
 }
 
@@ -267,36 +291,36 @@
 // Returns the clone if it duplicated the node, and null otherwise.
 Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
                                                    Node* effect,
+                                                   bool node_multiused,
+                                                   bool already_cloned,
                                                    bool multiple_users) {
   Node* input = NodeProperties::GetValueInput(node, node_index);
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing State Input #%d (%s)\n", input->id(),
-           input->op()->mnemonic());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return nullptr;
   }
+  TRACE("Reducing State Input #%d (%s)\n", input->id(),
+        input->op()->mnemonic());
   Node* clone = nullptr;
   if (input->opcode() == IrOpcode::kFinishRegion ||
       input->opcode() == IrOpcode::kAllocate) {
     if (escape_analysis()->IsVirtual(input)) {
       if (Node* object_state =
               escape_analysis()->GetOrCreateObjectState(effect, input)) {
-        if (node->UseCount() > 1 || multiple_users) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF("Cloning #%d", node->id());
-          }
+        if (node_multiused || (multiple_users && !already_cloned)) {
+          TRACE("Cloning #%d", node->id());
           node = clone = jsgraph()->graph()->CloneNode(node);
-          if (FLAG_trace_turbo_escape) {
-            PrintF(" to #%d\n", node->id());
-          }
+          TRACE(" to #%d\n", node->id());
+          node_multiused = false;
+          already_cloned = true;
         }
         NodeProperties::ReplaceValueInput(node, object_state, node_index);
-        if (FLAG_trace_turbo_escape) {
-          PrintF("Replaced state #%d input #%d with object state #%d\n",
-                 node->id(), input->id(), object_state->id());
-        }
+        TRACE("Replaced state #%d input #%d with object state #%d\n",
+              node->id(), input->id(), object_state->id());
       } else {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("No object state replacement available.\n");
-        }
+        TRACE("No object state replacement for #%d at effect #%d available.\n",
+              input->id(), effect->id());
+        UNREACHABLE();
       }
     }
   }
@@ -308,6 +332,36 @@
   return jsgraph_->isolate()->counters();
 }
 
+
+class EscapeAnalysisVerifier final : public AdvancedReducer {
+ public:
+  EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
+      : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
+
+  Reduction Reduce(Node* node) final {
+    switch (node->opcode()) {
+      case IrOpcode::kAllocate:
+        CHECK(!escape_analysis_->IsVirtual(node));
+        break;
+      default:
+        break;
+    }
+    return NoChange();
+  }
+
+ private:
+  EscapeAnalysis* escape_analysis_;
+};
+
+void EscapeAnalysisReducer::VerifyReplacement() const {
+#ifdef DEBUG
+  GraphReducer graph_reducer(zone(), jsgraph()->graph());
+  EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
+  graph_reducer.AddReducer(&verifier);
+  graph_reducer.ReduceGraph();
+#endif  // DEBUG
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 1c0da16..12487b1 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -29,6 +29,10 @@
                         EscapeAnalysis* escape_analysis, Zone* zone);
 
   Reduction Reduce(Node* node) final;
+  void SetExistsVirtualAllocate(bool exists) {
+    exists_virtual_allocate_ = exists;
+  }
+  void VerifyReplacement() const;
 
  private:
   Reduction ReduceLoad(Node* node);
@@ -38,9 +42,9 @@
   Reduction ReduceReferenceEqual(Node* node);
   Reduction ReduceObjectIsSmi(Node* node);
   Reduction ReduceFrameStateUses(Node* node);
-  Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
-  Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+  Node* ReduceDeoptState(Node* node, Node* effect, bool multiple_users);
   Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+                              bool node_multiused, bool already_cloned,
                               bool multiple_users);
 
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -51,7 +55,10 @@
   JSGraph* const jsgraph_;
   EscapeAnalysis* escape_analysis_;
   Zone* const zone_;
-  BitVector visited_;
+  // _visited marks nodes we already processed (allocs, loads, stores)
+  // and nodes that do not need a visit from ReduceDeoptState etc.
+  BitVector fully_reduced_;
+  bool exists_virtual_allocate_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
 };
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index af0ba6a..b1a12b2 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -24,106 +24,134 @@
 namespace internal {
 namespace compiler {
 
-const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
-    std::numeric_limits<Alias>::max();
-const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
-    std::numeric_limits<Alias>::max() - 1;
+using Alias = EscapeStatusAnalysis::Alias;
 
+#ifdef DEBUG
+#define TRACE(...)                                    \
+  do {                                                \
+    if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif
+
+const Alias EscapeStatusAnalysis::kNotReachable =
+    std::numeric_limits<Alias>::max();
+const Alias EscapeStatusAnalysis::kUntrackable =
+    std::numeric_limits<Alias>::max() - 1;
 
 class VirtualObject : public ZoneObject {
  public:
-  enum Status { kUntracked = 0, kTracked = 1 };
-  VirtualObject(NodeId id, Zone* zone)
+  enum Status {
+    kInitial = 0,
+    kTracked = 1u << 0,
+    kInitialized = 1u << 1,
+    kCopyRequired = 1u << 2,
+  };
+  typedef base::Flags<Status, unsigned char> StatusFlags;
+
+  VirtualObject(NodeId id, VirtualState* owner, Zone* zone)
       : id_(id),
-        status_(kUntracked),
+        status_(kInitial),
         fields_(zone),
         phi_(zone),
-        object_state_(nullptr) {}
+        object_state_(nullptr),
+        owner_(owner) {}
 
-  VirtualObject(const VirtualObject& other)
+  VirtualObject(VirtualState* owner, const VirtualObject& other)
       : id_(other.id_),
-        status_(other.status_),
+        status_(other.status_ & ~kCopyRequired),
         fields_(other.fields_),
         phi_(other.phi_),
-        object_state_(other.object_state_) {}
+        object_state_(other.object_state_),
+        owner_(owner) {}
 
-  VirtualObject(NodeId id, Zone* zone, size_t field_number)
+  VirtualObject(NodeId id, VirtualState* owner, Zone* zone, size_t field_number,
+                bool initialized)
       : id_(id),
-        status_(kTracked),
+        status_(kTracked | (initialized ? kInitialized : kInitial)),
         fields_(zone),
         phi_(zone),
-        object_state_(nullptr) {
+        object_state_(nullptr),
+        owner_(owner) {
     fields_.resize(field_number);
     phi_.resize(field_number, false);
   }
 
-  Node* GetField(size_t offset) {
-    if (offset < fields_.size()) {
-      return fields_[offset];
-    }
-    return nullptr;
-  }
+  Node* GetField(size_t offset) { return fields_[offset]; }
 
-  bool IsCreatedPhi(size_t offset) {
-    if (offset < phi_.size()) {
-      return phi_[offset];
-    }
-    return false;
-  }
+  bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
 
-  bool SetField(size_t offset, Node* node, bool created_phi = false) {
-    bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+  void SetField(size_t offset, Node* node, bool created_phi = false) {
     fields_[offset] = node;
     phi_[offset] = created_phi;
-    if (changed && FLAG_trace_turbo_escape && node) {
-      PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
-             node->op()->mnemonic());
-    }
-    return changed;
   }
-  bool IsVirtual() const { return status_ == kTracked; }
-  bool IsTracked() const { return status_ != kUntracked; }
+  bool IsTracked() const { return status_ & kTracked; }
+  bool IsInitialized() const { return status_ & kInitialized; }
+  bool SetInitialized() { return status_ |= kInitialized; }
+  VirtualState* owner() const { return owner_; }
 
   Node** fields_array() { return &fields_.front(); }
   size_t field_count() { return fields_.size(); }
   bool ResizeFields(size_t field_count) {
-    if (field_count != fields_.size()) {
+    if (field_count > fields_.size()) {
       fields_.resize(field_count);
       phi_.resize(field_count);
       return true;
     }
     return false;
   }
-  bool ClearAllFields() {
-    bool changed = false;
+  void ClearAllFields() {
     for (size_t i = 0; i < fields_.size(); ++i) {
-      if (fields_[i] != nullptr) {
-        fields_[i] = nullptr;
-        changed = true;
-      }
+      fields_[i] = nullptr;
       phi_[i] = false;
     }
-    return changed;
+  }
+  bool AllFieldsClear() {
+    for (size_t i = 0; i < fields_.size(); ++i) {
+      if (fields_[i] != nullptr) {
+        return false;
+      }
+    }
+    return true;
   }
   bool UpdateFrom(const VirtualObject& other);
+  bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+                 CommonOperatorBuilder* common);
   void SetObjectState(Node* node) { object_state_ = node; }
   Node* GetObjectState() const { return object_state_; }
+  bool IsCopyRequired() const { return status_ & kCopyRequired; }
+  void SetCopyRequired() { status_ |= kCopyRequired; }
+  bool NeedCopyForModification() {
+    if (!IsCopyRequired() || !IsInitialized()) {
+      return false;
+    }
+    return true;
+  }
 
   NodeId id() const { return id_; }
   void id(NodeId id) { id_ = id; }
 
  private:
+  bool MergeFields(size_t i, Node* at, MergeCache* cache, Graph* graph,
+                   CommonOperatorBuilder* common);
+
   NodeId id_;
-  Status status_;
+  StatusFlags status_;
   ZoneVector<Node*> fields_;
   ZoneVector<bool> phi_;
   Node* object_state_;
+  VirtualState* owner_;
+
+  DISALLOW_COPY_AND_ASSIGN(VirtualObject);
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
 
 bool VirtualObject::UpdateFrom(const VirtualObject& other) {
   bool changed = status_ != other.status_;
   status_ = other.status_;
+  phi_ = other.phi_;
   if (fields_.size() != other.fields_.size()) {
     fields_ = other.fields_;
     return true;
@@ -137,36 +165,49 @@
   return changed;
 }
 
-
 class VirtualState : public ZoneObject {
  public:
-  VirtualState(Zone* zone, size_t size);
-  VirtualState(const VirtualState& states);
+  VirtualState(Node* owner, Zone* zone, size_t size)
+      : info_(size, nullptr, zone), owner_(owner) {}
+
+  VirtualState(Node* owner, const VirtualState& state)
+      : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+        owner_(owner) {
+    for (size_t i = 0; i < info_.size(); ++i) {
+      if (state.info_[i]) {
+        info_[i] = state.info_[i];
+      }
+    }
+  }
 
   VirtualObject* VirtualObjectFromAlias(size_t alias);
-  VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
-                                                 NodeId id, Zone* zone);
-  void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
-  void LastChangedAt(Node* node) { last_changed_ = node; }
-  Node* GetLastChanged() { return last_changed_; }
+  void SetVirtualObject(Alias alias, VirtualObject* state);
   bool UpdateFrom(VirtualState* state, Zone* zone);
   bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
-                 CommonOperatorBuilder* common, Node* control);
+                 CommonOperatorBuilder* common, Node* at);
   size_t size() const { return info_.size(); }
+  Node* owner() const { return owner_; }
+  VirtualObject* Copy(VirtualObject* obj, Alias alias);
+  void SetCopyRequired() {
+    for (VirtualObject* obj : info_) {
+      if (obj) obj->SetCopyRequired();
+    }
+  }
 
  private:
   ZoneVector<VirtualObject*> info_;
-  Node* last_changed_;
-};
+  Node* owner_;
 
+  DISALLOW_COPY_AND_ASSIGN(VirtualState);
+};
 
 class MergeCache : public ZoneObject {
  public:
   explicit MergeCache(Zone* zone)
       : states_(zone), objects_(zone), fields_(zone) {
-    states_.reserve(4);
-    objects_.reserve(4);
-    fields_.reserve(4);
+    states_.reserve(5);
+    objects_.reserve(5);
+    fields_.reserve(5);
   }
   ZoneVector<VirtualState*>& states() { return states_; }
   ZoneVector<VirtualObject*>& objects() { return objects_; }
@@ -176,20 +217,20 @@
     objects_.clear();
     fields_.clear();
   }
-  size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
-  void LoadVirtualObjectsForFieldsFrom(
-      VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+  size_t LoadVirtualObjectsFromStatesFor(Alias alias);
+  void LoadVirtualObjectsForFieldsFrom(VirtualState* state,
+                                       const ZoneVector<Alias>& aliases);
   Node* GetFields(size_t pos);
 
  private:
   ZoneVector<VirtualState*> states_;
   ZoneVector<VirtualObject*> objects_;
   ZoneVector<Node*> fields_;
+
+  DISALLOW_COPY_AND_ASSIGN(MergeCache);
 };
 
-
-size_t MergeCache::LoadVirtualObjectsFromStatesFor(
-    EscapeAnalysis::Alias alias) {
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(Alias alias) {
   objects_.clear();
   DCHECK_GT(states_.size(), 0u);
   size_t min = std::numeric_limits<size_t>::max();
@@ -202,13 +243,12 @@
   return min;
 }
 
-
 void MergeCache::LoadVirtualObjectsForFieldsFrom(
-    VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+    VirtualState* state, const ZoneVector<Alias>& aliases) {
   objects_.clear();
   size_t max_alias = state->size();
   for (Node* field : fields_) {
-    EscapeAnalysis::Alias alias = aliases[field->id()];
+    Alias alias = aliases[field->id()];
     if (alias >= max_alias) continue;
     if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
       objects_.push_back(obj);
@@ -216,11 +256,13 @@
   }
 }
 
-
 Node* MergeCache::GetFields(size_t pos) {
   fields_.clear();
-  Node* rep = objects_.front()->GetField(pos);
+  Node* rep = pos >= objects_.front()->field_count()
+                  ? nullptr
+                  : objects_.front()->GetField(pos);
   for (VirtualObject* obj : objects_) {
+    if (pos >= obj->field_count()) continue;
     Node* field = obj->GetField(pos);
     if (field) {
       fields_.push_back(field);
@@ -232,72 +274,48 @@
   return rep;
 }
 
-
-VirtualState::VirtualState(Zone* zone, size_t size)
-    : info_(size, nullptr, zone), last_changed_(nullptr) {}
-
-
-VirtualState::VirtualState(const VirtualState& state)
-    : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
-      last_changed_(state.last_changed_) {
-  for (size_t i = 0; i < state.info_.size(); ++i) {
-    if (state.info_[i]) {
-      info_[i] =
-          new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
-    }
-  }
+VirtualObject* VirtualState::Copy(VirtualObject* obj, Alias alias) {
+  if (obj->owner() == this) return obj;
+  VirtualObject* new_obj =
+      new (info_.get_allocator().zone()) VirtualObject(this, *obj);
+  TRACE("At state %p, alias @%d (#%d), copying virtual object from %p to %p\n",
+        static_cast<void*>(this), alias, obj->id(), static_cast<void*>(obj),
+        static_cast<void*>(new_obj));
+  info_[alias] = new_obj;
+  return new_obj;
 }
 
-
 VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
   return info_[alias];
 }
 
-
-VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
-    EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
-  if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
-    return obj;
-  }
-  VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
-  SetVirtualObject(alias, obj);
-  return obj;
-}
-
-
-void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
-                                    VirtualObject* obj) {
+void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
   info_[alias] = obj;
 }
 
-
 bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+  if (from == this) return false;
   bool changed = false;
-  for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+  for (Alias alias = 0; alias < size(); ++alias) {
     VirtualObject* ls = VirtualObjectFromAlias(alias);
     VirtualObject* rs = from->VirtualObjectFromAlias(alias);
 
-    if (rs == nullptr) {
-      continue;
-    }
+    if (ls == rs || rs == nullptr) continue;
 
     if (ls == nullptr) {
-      ls = new (zone) VirtualObject(*rs);
+      ls = new (zone) VirtualObject(this, *rs);
       SetVirtualObject(alias, ls);
       changed = true;
       continue;
     }
 
-    if (FLAG_trace_turbo_escape) {
-      PrintF("  Updating fields of @%d\n", alias);
-    }
+    TRACE("  Updating fields of @%d\n", alias);
 
     changed = ls->UpdateFrom(*rs) || changed;
   }
   return false;
 }
 
-
 namespace {
 
 bool IsEquivalentPhi(Node* node1, Node* node2) {
@@ -316,7 +334,6 @@
   return true;
 }
 
-
 bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
   if (phi->opcode() != IrOpcode::kPhi) return false;
   if (phi->op()->ValueInputCount() != inputs.size()) {
@@ -333,186 +350,225 @@
 
 }  // namespace
 
-
-Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
-  Node* rep = GetReplacement(objs.front()->id());
-  for (VirtualObject* obj : objs) {
-    if (GetReplacement(obj->id()) != rep) {
-      return nullptr;
+bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
+                                Graph* graph, CommonOperatorBuilder* common) {
+  bool changed = false;
+  int value_input_count = static_cast<int>(cache->fields().size());
+  Node* rep = GetField(i);
+  if (!rep || !IsCreatedPhi(i)) {
+    Node* control = NodeProperties::GetControlInput(at);
+    cache->fields().push_back(control);
+    Node* phi = graph->NewNode(
+        common->Phi(MachineRepresentation::kTagged, value_input_count),
+        value_input_count + 1, &cache->fields().front());
+    SetField(i, phi, true);
+#ifdef DEBUG
+    if (FLAG_trace_turbo_escape) {
+      PrintF("    Creating Phi #%d as merge of", phi->id());
+      for (int i = 0; i < value_input_count; i++) {
+        PrintF(" #%d (%s)", cache->fields()[i]->id(),
+               cache->fields()[i]->op()->mnemonic());
+      }
+      PrintF("\n");
+    }
+#endif
+    changed = true;
+  } else {
+    DCHECK(rep->opcode() == IrOpcode::kPhi);
+    for (int n = 0; n < value_input_count; ++n) {
+      Node* old = NodeProperties::GetValueInput(rep, n);
+      if (old != cache->fields()[n]) {
+        changed = true;
+        NodeProperties::ReplaceValueInput(rep, cache->fields()[n], n);
+      }
     }
   }
-  return rep;
+  return changed;
 }
 
+bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+                              CommonOperatorBuilder* common) {
+  DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
+         at->opcode() == IrOpcode::kPhi);
+  bool changed = false;
+  for (size_t i = 0; i < field_count(); ++i) {
+    if (Node* field = cache->GetFields(i)) {
+      changed = changed || GetField(i) != field;
+      SetField(i, field);
+      TRACE("    Field %zu agree on rep #%d\n", i, field->id());
+    } else {
+      int arity = at->opcode() == IrOpcode::kEffectPhi
+                      ? at->op()->EffectInputCount()
+                      : at->op()->ValueInputCount();
+      if (cache->fields().size() == arity) {
+        changed = MergeFields(i, at, cache, graph, common) || changed;
+      } else {
+        if (GetField(i) != nullptr) {
+          TRACE("    Field %zu cleared\n", i);
+          changed = true;
+        }
+        SetField(i, nullptr);
+      }
+    }
+  }
+  return changed;
+}
 
 bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
-                             CommonOperatorBuilder* common, Node* control) {
+                             CommonOperatorBuilder* common, Node* at) {
   DCHECK_GT(cache->states().size(), 0u);
   bool changed = false;
-  for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
-    size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
-    if (cache->objects().size() == cache->states().size()) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF("  Merging virtual objects of @%d\n", alias);
-      }
-      VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
-          alias, cache->objects().front()->id(), zone);
-      changed = mergeObject->ResizeFields(fields) || changed;
-      for (size_t i = 0; i < fields; ++i) {
-        if (Node* field = cache->GetFields(i)) {
-          changed = mergeObject->SetField(i, field) || changed;
-          if (FLAG_trace_turbo_escape) {
-            PrintF("    Field %zu agree on rep #%d\n", i, field->id());
-          }
-        } else {
-          int value_input_count = static_cast<int>(cache->fields().size());
-          if (cache->fields().size() == cache->objects().size()) {
-            Node* rep = mergeObject->GetField(i);
-            if (!rep || !mergeObject->IsCreatedPhi(i)) {
-              cache->fields().push_back(control);
-              Node* phi = graph->NewNode(
-                  common->Phi(MachineRepresentation::kTagged,
-                              value_input_count),
-                  value_input_count + 1, &cache->fields().front());
-              mergeObject->SetField(i, phi, true);
-              if (FLAG_trace_turbo_escape) {
-                PrintF("    Creating Phi #%d as merge of", phi->id());
-                for (int i = 0; i < value_input_count; i++) {
-                  PrintF(" #%d (%s)", cache->fields()[i]->id(),
-                         cache->fields()[i]->op()->mnemonic());
-                }
-                PrintF("\n");
-              }
-              changed = true;
-            } else {
-              DCHECK(rep->opcode() == IrOpcode::kPhi);
-              for (int n = 0; n < value_input_count; ++n) {
-                if (n < rep->op()->ValueInputCount()) {
-                  Node* old = NodeProperties::GetValueInput(rep, n);
-                  if (old != cache->fields()[n]) {
-                    changed = true;
-                    NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
-                                                      n);
-                  }
-                } else {
-                  changed = true;
-                  rep->InsertInput(graph->zone(), n, cache->fields()[n]);
-                }
-              }
-              if (rep->op()->ValueInputCount() != value_input_count) {
-                if (FLAG_trace_turbo_escape) {
-                  PrintF("    Widening Phi #%d of arity %d to %d", rep->id(),
-                         rep->op()->ValueInputCount(), value_input_count);
-                }
-                NodeProperties::ChangeOp(
-                    rep, common->Phi(MachineRepresentation::kTagged,
-                                     value_input_count));
-              }
-            }
-          } else {
-            changed = mergeObject->SetField(i, nullptr) || changed;
-          }
+  for (Alias alias = 0; alias < size(); ++alias) {
+    cache->objects().clear();
+    VirtualObject* mergeObject = VirtualObjectFromAlias(alias);
+    bool copy_merge_object = false;
+    size_t fields = std::numeric_limits<size_t>::max();
+    for (VirtualState* state : cache->states()) {
+      if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+        cache->objects().push_back(obj);
+        if (mergeObject == obj) {
+          copy_merge_object = true;
         }
+        fields = std::min(obj->field_count(), fields);
       }
+    }
+    if (cache->objects().size() == cache->states().size()) {
+      if (!mergeObject) {
+        VirtualObject* obj = new (zone)
+            VirtualObject(cache->objects().front()->id(), this, zone, fields,
+                          cache->objects().front()->IsInitialized());
+        SetVirtualObject(alias, obj);
+        mergeObject = obj;
+        changed = true;
+      } else if (copy_merge_object) {
+        VirtualObject* obj = new (zone) VirtualObject(this, *mergeObject);
+        SetVirtualObject(alias, obj);
+        mergeObject = obj;
+        changed = true;
+      } else {
+        changed = mergeObject->ResizeFields(fields) || changed;
+      }
+#ifdef DEBUG
+      if (FLAG_trace_turbo_escape) {
+        PrintF("  Alias @%d, merging into %p virtual objects", alias,
+               static_cast<void*>(mergeObject));
+        for (size_t i = 0; i < cache->objects().size(); i++) {
+          PrintF(" %p", static_cast<void*>(cache->objects()[i]));
+        }
+        PrintF("\n");
+      }
+#endif  // DEBUG
+      changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
     } else {
+      if (mergeObject) {
+        TRACE("  Alias %d, virtual object removed\n", alias);
+        changed = true;
+      }
       SetVirtualObject(alias, nullptr);
     }
   }
   return changed;
 }
 
-
 EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
                                            Graph* graph, Zone* zone)
-    : object_analysis_(object_analysis),
+    : stack_(zone),
+      object_analysis_(object_analysis),
       graph_(graph),
       zone_(zone),
-      status_(graph->NodeCount(), kUnknown, zone),
-      queue_(zone) {}
-
+      status_(zone),
+      next_free_alias_(0),
+      status_stack_(zone),
+      aliases_(zone) {}
 
 EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
 
-
 bool EscapeStatusAnalysis::HasEntry(Node* node) {
   return status_[node->id()] & (kTracked | kEscaped);
 }
 
-
 bool EscapeStatusAnalysis::IsVirtual(Node* node) {
-  return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+  return IsVirtual(node->id());
 }
 
+bool EscapeStatusAnalysis::IsVirtual(NodeId id) {
+  return (status_[id] & kTracked) && !(status_[id] & kEscaped);
+}
 
 bool EscapeStatusAnalysis::IsEscaped(Node* node) {
   return status_[node->id()] & kEscaped;
 }
 
-
 bool EscapeStatusAnalysis::IsAllocation(Node* node) {
   return node->opcode() == IrOpcode::kAllocate ||
          node->opcode() == IrOpcode::kFinishRegion;
 }
 
-
 bool EscapeStatusAnalysis::SetEscaped(Node* node) {
   bool changed = !(status_[node->id()] & kEscaped);
   status_[node->id()] |= kEscaped | kTracked;
   return changed;
 }
 
-
-void EscapeStatusAnalysis::Resize() {
-  status_.resize(graph()->NodeCount(), kUnknown);
+bool EscapeStatusAnalysis::IsInQueue(NodeId id) {
+  return status_[id] & kInQueue;
 }
 
-
-size_t EscapeStatusAnalysis::size() { return status_.size(); }
-
-
-void EscapeStatusAnalysis::Run() {
-  Resize();
-  queue_.push_back(graph()->end());
-  status_[graph()->end()->id()] |= kOnStack;
-  while (!queue_.empty()) {
-    Node* node = queue_.front();
-    queue_.pop_front();
-    status_[node->id()] &= ~kOnStack;
-    Process(node);
-    status_[node->id()] |= kVisited;
-    for (Edge edge : node->input_edges()) {
-      Node* input = edge.to();
-      if (!(status_[input->id()] & (kVisited | kOnStack))) {
-        queue_.push_back(input);
-        status_[input->id()] |= kOnStack;
-      }
-    }
+void EscapeStatusAnalysis::SetInQueue(NodeId id, bool on_stack) {
+  if (on_stack) {
+    status_[id] |= kInQueue;
+  } else {
+    status_[id] &= ~kInQueue;
   }
 }
 
+void EscapeStatusAnalysis::ResizeStatusVector() {
+  if (status_.size() <= graph()->NodeCount()) {
+    status_.resize(graph()->NodeCount() * 1.1, kUnknown);
+  }
+}
+
+size_t EscapeStatusAnalysis::GetStatusVectorSize() { return status_.size(); }
+
+void EscapeStatusAnalysis::RunStatusAnalysis() {
+  ResizeStatusVector();
+  while (!status_stack_.empty()) {
+    Node* node = status_stack_.back();
+    status_stack_.pop_back();
+    status_[node->id()] &= ~kOnStack;
+    Process(node);
+    status_[node->id()] |= kVisited;
+  }
+}
+
+void EscapeStatusAnalysis::EnqueueForStatusAnalysis(Node* node) {
+  DCHECK_NOT_NULL(node);
+  if (!(status_[node->id()] & kOnStack)) {
+    status_stack_.push_back(node);
+    status_[node->id()] |= kOnStack;
+  }
+}
 
 void EscapeStatusAnalysis::RevisitInputs(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
     if (!(status_[input->id()] & kOnStack)) {
-      queue_.push_back(input);
+      status_stack_.push_back(input);
       status_[input->id()] |= kOnStack;
     }
   }
 }
 
-
 void EscapeStatusAnalysis::RevisitUses(Node* node) {
   for (Edge edge : node->use_edges()) {
     Node* use = edge.from();
-    if (!(status_[use->id()] & kOnStack)) {
-      queue_.push_back(use);
+    if (!(status_[use->id()] & kOnStack) && !IsNotReachable(use)) {
+      status_stack_.push_back(use);
       status_[use->id()] |= kOnStack;
     }
   }
 }
 
-
 void EscapeStatusAnalysis::Process(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kAllocate:
@@ -535,15 +591,17 @@
           RevisitUses(rep);
         }
       }
+      RevisitUses(node);
       break;
     }
     case IrOpcode::kPhi:
       if (!HasEntry(node)) {
         status_[node->id()] |= kTracked;
-        if (!IsAllocationPhi(node)) {
-          SetEscaped(node);
-          RevisitUses(node);
-        }
+        RevisitUses(node);
+      }
+      if (!IsAllocationPhi(node) && SetEscaped(node)) {
+        RevisitInputs(node);
+        RevisitUses(node);
       }
       CheckUsesForEscape(node);
     default:
@@ -551,7 +609,6 @@
   }
 }
 
-
 bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
@@ -562,7 +619,6 @@
   return true;
 }
 
-
 void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
   Node* to = NodeProperties::GetValueInput(node, 0);
@@ -570,14 +626,11 @@
   if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
     RevisitUses(val);
     RevisitInputs(val);
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
-             val->id(), val->op()->mnemonic(), to->id());
-    }
+    TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+          val->id(), val->op()->mnemonic(), to->id());
   }
 }
 
-
 void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
   Node* to = NodeProperties::GetValueInput(node, 0);
@@ -585,34 +638,27 @@
   if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
     RevisitUses(val);
     RevisitInputs(val);
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
-             val->id(), val->op()->mnemonic(), to->id());
-    }
+    TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+          val->id(), val->op()->mnemonic(), to->id());
   }
 }
 
-
 void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
   if (!HasEntry(node)) {
     status_[node->id()] |= kTracked;
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Created status entry for node #%d (%s)\n", node->id(),
-             node->op()->mnemonic());
-    }
+    TRACE("Created status entry for node #%d (%s)\n", node->id(),
+          node->op()->mnemonic());
     NumberMatcher size(node->InputAt(0));
     DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+    RevisitUses(node);
     if (!size.HasValue() && SetEscaped(node)) {
-      RevisitUses(node);
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Setting #%d to escaped because of non-const alloc\n",
-               node->id());
-      }
-      // This node is known to escape, uses do not have to be checked.
+      TRACE("Setting #%d to escaped because of non-const alloc\n", node->id());
+      // This node is already known to escape, uses do not have to be checked
+      // for escape.
       return;
     }
   }
@@ -621,24 +667,22 @@
   }
 }
 
-
 bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
                                               bool phi_escaping) {
   for (Edge edge : uses->use_edges()) {
     Node* use = edge.from();
+    if (IsNotReachable(use)) continue;
     if (edge.index() >= use->op()->ValueInputCount() +
                             OperatorProperties::GetContextInputCount(use->op()))
       continue;
     switch (use->opcode()) {
       case IrOpcode::kPhi:
         if (phi_escaping && SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF(
-                "Setting #%d (%s) to escaped because of use by phi node "
-                "#%d (%s)\n",
-                rep->id(), rep->op()->mnemonic(), use->id(),
-                use->op()->mnemonic());
-          }
+          TRACE(
+              "Setting #%d (%s) to escaped because of use by phi node "
+              "#%d (%s)\n",
+              rep->id(), rep->op()->mnemonic(), use->id(),
+              use->op()->mnemonic());
           return true;
         }
       // Fallthrough.
@@ -651,37 +695,41 @@
       case IrOpcode::kReferenceEqual:
       case IrOpcode::kFinishRegion:
         if (IsEscaped(use) && SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF(
-                "Setting #%d (%s) to escaped because of use by escaping node "
-                "#%d (%s)\n",
-                rep->id(), rep->op()->mnemonic(), use->id(),
-                use->op()->mnemonic());
-          }
+          TRACE(
+              "Setting #%d (%s) to escaped because of use by escaping node "
+              "#%d (%s)\n",
+              rep->id(), rep->op()->mnemonic(), use->id(),
+              use->op()->mnemonic());
           return true;
         }
         break;
       case IrOpcode::kObjectIsSmi:
         if (!IsAllocation(rep) && SetEscaped(rep)) {
-          PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
-                 rep->id(), rep->op()->mnemonic(), use->id(),
-                 use->op()->mnemonic());
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
+          return true;
+        }
+        break;
+      case IrOpcode::kSelect:
+        if (SetEscaped(rep)) {
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
           return true;
         }
         break;
       default:
         if (use->op()->EffectInputCount() == 0 &&
             uses->op()->EffectInputCount() > 0) {
-          PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
-                 use->op()->mnemonic());
+          TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
+                use->op()->mnemonic());
           UNREACHABLE();
         }
         if (SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
-                   rep->id(), rep->op()->mnemonic(), use->id(),
-                   use->op()->mnemonic());
-          }
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
           return true;
         }
     }
@@ -689,7 +737,6 @@
   return false;
 }
 
-
 void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   if (!HasEntry(node)) {
@@ -701,7 +748,6 @@
   }
 }
 
-
 void EscapeStatusAnalysis::DebugPrint() {
   for (NodeId id = 0; id < status_.size(); id++) {
     if (status_[id] & kTracked) {
@@ -711,58 +757,69 @@
   }
 }
 
-
 EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
                                Zone* zone)
-    : graph_(graph),
+    : status_analysis_(this, graph, zone),
       common_(common),
-      zone_(zone),
       virtual_states_(zone),
       replacements_(zone),
-      escape_status_(this, graph, zone),
-      cache_(new (zone) MergeCache(zone)),
-      aliases_(zone),
-      next_free_alias_(0) {}
-
+      cache_(nullptr) {}
 
 EscapeAnalysis::~EscapeAnalysis() {}
 
-
 void EscapeAnalysis::Run() {
   replacements_.resize(graph()->NodeCount());
-  AssignAliases();
-  RunObjectAnalysis();
-  escape_status_.Run();
+  status_analysis_.AssignAliases();
+  if (status_analysis_.AliasCount() > 0) {
+    cache_ = new (zone()) MergeCache(zone());
+    replacements_.resize(graph()->NodeCount());
+    status_analysis_.ResizeStatusVector();
+    RunObjectAnalysis();
+    status_analysis_.RunStatusAnalysis();
+  }
 }
 
-
-void EscapeAnalysis::AssignAliases() {
-  ZoneVector<Node*> stack(zone());
-  stack.push_back(graph()->end());
+void EscapeStatusAnalysis::AssignAliases() {
+  size_t max_size = 1024;
+  size_t min_size = 32;
+  size_t stack_size =
+      std::min(std::max(graph()->NodeCount() / 5, min_size), max_size);
+  stack_.reserve(stack_size);
+  ResizeStatusVector();
+  stack_.push_back(graph()->end());
   CHECK_LT(graph()->NodeCount(), kUntrackable);
   aliases_.resize(graph()->NodeCount(), kNotReachable);
   aliases_[graph()->end()->id()] = kUntrackable;
-  while (!stack.empty()) {
-    Node* node = stack.back();
-    stack.pop_back();
+  status_stack_.reserve(8);
+  TRACE("Discovering trackable nodes");
+  while (!stack_.empty()) {
+    Node* node = stack_.back();
+    stack_.pop_back();
     switch (node->opcode()) {
       case IrOpcode::kAllocate:
         if (aliases_[node->id()] >= kUntrackable) {
           aliases_[node->id()] = NextAlias();
+          TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+                node->id());
+          EnqueueForStatusAnalysis(node);
         }
         break;
       case IrOpcode::kFinishRegion: {
         Node* allocate = NodeProperties::GetValueInput(node, 0);
+        DCHECK_NOT_NULL(allocate);
         if (allocate->opcode() == IrOpcode::kAllocate) {
           if (aliases_[allocate->id()] >= kUntrackable) {
             if (aliases_[allocate->id()] == kNotReachable) {
-              stack.push_back(allocate);
+              stack_.push_back(allocate);
             }
             aliases_[allocate->id()] = NextAlias();
+            TRACE(" @%d:%s#%u", aliases_[allocate->id()],
+                  allocate->op()->mnemonic(), allocate->id());
+            EnqueueForStatusAnalysis(allocate);
           }
           aliases_[node->id()] = aliases_[allocate->id()];
-        } else {
-          aliases_[node->id()] = NextAlias();
+          TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+                node->id());
         }
         break;
       }
@@ -773,81 +830,119 @@
     for (Edge edge : node->input_edges()) {
       Node* input = edge.to();
       if (aliases_[input->id()] == kNotReachable) {
-        stack.push_back(input);
+        stack_.push_back(input);
         aliases_[input->id()] = kUntrackable;
       }
     }
   }
-
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Discovered trackable nodes");
-    for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
-      if (aliases_[id] < kUntrackable) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" #%u", id);
-        }
-      }
-    }
-    PrintF("\n");
-  }
+  TRACE("\n");
 }
 
+bool EscapeStatusAnalysis::IsNotReachable(Node* node) {
+  if (node->id() >= aliases_.size()) {
+    return false;
+  }
+  return aliases_[node->id()] == kNotReachable;
+}
 
 void EscapeAnalysis::RunObjectAnalysis() {
   virtual_states_.resize(graph()->NodeCount());
-  ZoneVector<Node*> stack(zone());
-  stack.push_back(graph()->start());
-  while (!stack.empty()) {
-    Node* node = stack.back();
-    stack.pop_back();
-    if (aliases_[node->id()] != kNotReachable && Process(node)) {
+  ZoneDeque<Node*> queue(zone());
+  queue.push_back(graph()->start());
+  ZoneVector<Node*> danglers(zone());
+  while (!queue.empty()) {
+    Node* node = queue.back();
+    queue.pop_back();
+    status_analysis_.SetInQueue(node->id(), false);
+    if (Process(node)) {
       for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        if (IsNotReachable(use)) {
+          continue;
+        }
         if (NodeProperties::IsEffectEdge(edge)) {
-          Node* use = edge.from();
-          if ((use->opcode() != IrOpcode::kLoadField &&
-               use->opcode() != IrOpcode::kLoadElement) ||
-              !IsDanglingEffectNode(use)) {
-            stack.push_back(use);
+          // Iteration order: depth first, but delay phis.
+          // We need DFS do avoid some duplication of VirtualStates and
+          // VirtualObjects, and we want to delay phis to improve performance.
+          if (use->opcode() == IrOpcode::kEffectPhi) {
+            if (!status_analysis_.IsInQueue(use->id())) {
+              queue.push_front(use);
+            }
+          } else if ((use->opcode() != IrOpcode::kLoadField &&
+                      use->opcode() != IrOpcode::kLoadElement) ||
+                     !IsDanglingEffectNode(use)) {
+            if (!status_analysis_.IsInQueue(use->id())) {
+              status_analysis_.SetInQueue(use->id(), true);
+              queue.push_back(use);
+            }
+          } else {
+            danglers.push_back(use);
           }
         }
       }
-      // First process loads: dangling loads are a problem otherwise.
-      for (Edge edge : node->use_edges()) {
-        if (NodeProperties::IsEffectEdge(edge)) {
-          Node* use = edge.from();
-          if ((use->opcode() == IrOpcode::kLoadField ||
-               use->opcode() == IrOpcode::kLoadElement) &&
-              IsDanglingEffectNode(use)) {
-            stack.push_back(use);
-          }
-        }
-      }
+      // Danglers need to be processed immediately, even if they are
+      // on the stack. Since they do not have effect outputs,
+      // we don't have to track whether they are on the stack.
+      queue.insert(queue.end(), danglers.begin(), danglers.end());
+      danglers.clear();
     }
   }
+#ifdef DEBUG
   if (FLAG_trace_turbo_escape) {
     DebugPrint();
   }
+#endif
 }
 
-
-bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
-  if (node->op()->EffectInputCount() == 0) return false;
-  if (node->op()->EffectOutputCount() == 0) return false;
-  if (node->op()->EffectInputCount() == 1 &&
-      NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+bool EscapeStatusAnalysis::IsDanglingEffectNode(Node* node) {
+  if (status_[node->id()] & kDanglingComputed) {
+    return status_[node->id()] & kDangling;
+  }
+  if (node->op()->EffectInputCount() == 0 ||
+      node->op()->EffectOutputCount() == 0 ||
+      (node->op()->EffectInputCount() == 1 &&
+       NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart)) {
     // The start node is used as sentinel for nodes that are in general
     // effectful, but of which an analysis has determined that they do not
     // produce effects in this instance. We don't consider these nodes dangling.
+    status_[node->id()] |= kDanglingComputed;
     return false;
   }
   for (Edge edge : node->use_edges()) {
+    Node* use = edge.from();
+    if (aliases_[use->id()] == kNotReachable) continue;
     if (NodeProperties::IsEffectEdge(edge)) {
+      status_[node->id()] |= kDanglingComputed;
       return false;
     }
   }
+  status_[node->id()] |= kDanglingComputed | kDangling;
   return true;
 }
 
+bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
+  if (status_[node->id()] & kBranchPointComputed) {
+    return status_[node->id()] & kBranchPoint;
+  }
+  int count = 0;
+  for (Edge edge : node->use_edges()) {
+    Node* use = edge.from();
+    if (aliases_[use->id()] == kNotReachable) continue;
+    if (NodeProperties::IsEffectEdge(edge)) {
+      if ((use->opcode() == IrOpcode::kLoadField ||
+           use->opcode() == IrOpcode::kLoadElement ||
+           use->opcode() == IrOpcode::kLoad) &&
+          IsDanglingEffectNode(use))
+        continue;
+      if (++count > 1) {
+        status_[node->id()] |= kBranchPointComputed | kBranchPoint;
+        return true;
+      }
+    }
+  }
+  status_[node->id()] |= kBranchPointComputed;
+  return false;
+}
 
 bool EscapeAnalysis::Process(Node* node) {
   switch (node->opcode()) {
@@ -888,12 +983,12 @@
   return true;
 }
 
-
 void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
-    if (!NodeProperties::IsValueEdge(edge) &&
-        !NodeProperties::IsContextEdge(edge))
+    Node* use = edge.from();
+    if (edge.index() >= use->op()->ValueInputCount() +
+                            OperatorProperties::GetContextInputCount(use->op()))
       continue;
     switch (node->opcode()) {
       case IrOpcode::kStoreField:
@@ -904,13 +999,17 @@
       case IrOpcode::kStateValues:
       case IrOpcode::kReferenceEqual:
       case IrOpcode::kFinishRegion:
-      case IrOpcode::kPhi:
+      case IrOpcode::kObjectIsSmi:
         break;
       default:
         VirtualState* state = virtual_states_[node->id()];
-        if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
-          if (obj->ClearAllFields()) {
-            state->LastChangedAt(node);
+        if (VirtualObject* obj =
+                GetVirtualObject(state, ResolveReplacement(input))) {
+          if (!obj->AllFieldsClear()) {
+            obj = CopyForModificationAt(obj, state, node);
+            obj->ClearAllFields();
+            TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+                  obj->id());
           }
         }
         break;
@@ -918,22 +1017,32 @@
   }
 }
 
-
-bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
-  int count = 0;
-  for (Edge edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      if (++count > 1) {
-        return true;
-      }
-    }
+VirtualState* EscapeAnalysis::CopyForModificationAt(VirtualState* state,
+                                                    Node* node) {
+  if (state->owner() != node) {
+    VirtualState* new_state = new (zone()) VirtualState(node, *state);
+    virtual_states_[node->id()] = new_state;
+    TRACE("Copying virtual state %p to new state %p at node %s#%d\n",
+          static_cast<void*>(state), static_cast<void*>(new_state),
+          node->op()->mnemonic(), node->id());
+    return new_state;
   }
-  return false;
+  return state;
 }
 
+VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
+                                                     VirtualState* state,
+                                                     Node* node) {
+  if (obj->NeedCopyForModification()) {
+    state = CopyForModificationAt(state, node);
+    return state->Copy(obj, GetAlias(obj->id()));
+  }
+  return obj;
+}
 
 void EscapeAnalysis::ForwardVirtualState(Node* node) {
   DCHECK_EQ(node->op()->EffectInputCount(), 1);
+#ifdef DEBUG
   if (node->opcode() != IrOpcode::kLoadField &&
       node->opcode() != IrOpcode::kLoadElement &&
       node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
@@ -941,189 +1050,154 @@
            node->op()->mnemonic());
     UNREACHABLE();
   }
+#endif  // DEBUG
   Node* effect = NodeProperties::GetEffectInput(node);
-  // Break the cycle for effect phis.
-  if (effect->opcode() == IrOpcode::kEffectPhi) {
-    if (virtual_states_[effect->id()] == nullptr) {
-      virtual_states_[effect->id()] =
-          new (zone()) VirtualState(zone(), AliasCount());
-    }
-  }
   DCHECK_NOT_NULL(virtual_states_[effect->id()]);
-  if (IsEffectBranchPoint(effect)) {
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
-             static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
-             effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
-    }
-    if (!virtual_states_[node->id()]) {
-      virtual_states_[node->id()] =
-          new (zone()) VirtualState(*virtual_states_[effect->id()]);
-    } else {
-      virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
-                                              zone());
-    }
+  if (virtual_states_[node->id()]) {
+    virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+                                            zone());
   } else {
     virtual_states_[node->id()] = virtual_states_[effect->id()];
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
-             static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
-             effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+    TRACE("Forwarding object state %p from %s#%d to %s#%d",
+          static_cast<void*>(virtual_states_[effect->id()]),
+          effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
+          node->id());
+    if (IsEffectBranchPoint(effect) ||
+        OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+      virtual_states_[node->id()]->SetCopyRequired();
+      TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
+            effect->id());
     }
+    TRACE("\n");
   }
 }
 
-
 void EscapeAnalysis::ProcessStart(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStart);
-  virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+  virtual_states_[node->id()] =
+      new (zone()) VirtualState(node, zone(), AliasCount());
 }
 
-
 bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
   bool changed = false;
 
   VirtualState* mergeState = virtual_states_[node->id()];
   if (!mergeState) {
-    mergeState = new (zone()) VirtualState(zone(), AliasCount());
+    mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
     virtual_states_[node->id()] = mergeState;
     changed = true;
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
-             static_cast<void*>(mergeState));
-    }
-  } else if (mergeState->GetLastChanged() != node) {
-    changed = true;
+    TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
+          static_cast<void*>(mergeState));
   }
 
   cache_->Clear();
 
-  if (FLAG_trace_turbo_escape) {
-    PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
-           static_cast<void*>(mergeState));
-  }
+  TRACE("At Effect Phi #%d, merging states into %p:", node->id(),
+        static_cast<void*>(mergeState));
 
   for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
     Node* input = NodeProperties::GetEffectInput(node, i);
     VirtualState* state = virtual_states_[input->id()];
     if (state) {
       cache_->states().push_back(state);
+      if (state == mergeState) {
+        mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+        virtual_states_[node->id()] = mergeState;
+        changed = true;
+      }
     }
-    if (FLAG_trace_turbo_escape) {
-      PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
-             input->op()->mnemonic());
-    }
+    TRACE(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+          input->op()->mnemonic());
   }
-  if (FLAG_trace_turbo_escape) {
-    PrintF("\n");
-  }
+  TRACE("\n");
 
   if (cache_->states().size() == 0) {
     return changed;
   }
 
-  changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
-                                  NodeProperties::GetControlInput(node)) ||
-            changed;
+  changed =
+      mergeState->MergeFrom(cache_, zone(), graph(), common(), node) || changed;
 
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
-  }
+  TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
 
   if (changed) {
-    mergeState->LastChangedAt(node);
-    escape_status_.Resize();
+    status_analysis_.ResizeStatusVector();
   }
   return changed;
 }
 
-
 void EscapeAnalysis::ProcessAllocation(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
   ForwardVirtualState(node);
+  VirtualState* state = virtual_states_[node->id()];
+  Alias alias = GetAlias(node->id());
 
   // Check if we have already processed this node.
-  if (virtual_states_[node->id()]->VirtualObjectFromAlias(
-          aliases_[node->id()])) {
+  if (state->VirtualObjectFromAlias(alias)) {
     return;
   }
 
+  if (state->owner()->opcode() == IrOpcode::kEffectPhi) {
+    state = CopyForModificationAt(state, node);
+  }
+
   NumberMatcher size(node->InputAt(0));
   DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
   if (size.HasValue()) {
-    virtual_states_[node->id()]->SetVirtualObject(
-        aliases_[node->id()],
-        new (zone())
-            VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+    VirtualObject* obj = new (zone()) VirtualObject(
+        node->id(), state, zone(), size.Value() / kPointerSize, false);
+    state->SetVirtualObject(alias, obj);
   } else {
-    virtual_states_[node->id()]->SetVirtualObject(
-        aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+    state->SetVirtualObject(
+        alias, new (zone()) VirtualObject(node->id(), state, zone()));
   }
-  virtual_states_[node->id()]->LastChangedAt(node);
 }
 
-
 void EscapeAnalysis::ProcessFinishRegion(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   ForwardVirtualState(node);
   Node* allocation = NodeProperties::GetValueInput(node, 0);
   if (allocation->opcode() == IrOpcode::kAllocate) {
     VirtualState* state = virtual_states_[node->id()];
-    if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
-      VirtualObject* vobj_alloc =
-          state->VirtualObjectFromAlias(aliases_[allocation->id()]);
-      DCHECK_NOT_NULL(vobj_alloc);
-      state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Linked finish region node #%d to node #%d\n", node->id(),
-               allocation->id());
-      }
-      state->LastChangedAt(node);
-    }
+    VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+    DCHECK_NOT_NULL(obj);
+    obj->SetInitialized();
   }
 }
 
-
 Node* EscapeAnalysis::replacement(NodeId id) {
   if (id >= replacements_.size()) return nullptr;
   return replacements_[id];
 }
 
-
 Node* EscapeAnalysis::replacement(Node* node) {
   return replacement(node->id());
 }
 
-
 bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
   bool changed = replacements_[node->id()] != rep;
   replacements_[node->id()] = rep;
   return changed;
 }
 
-
 bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
                                        Node* rep) {
   if (SetReplacement(node, rep)) {
-    state->LastChangedAt(node);
-    if (FLAG_trace_turbo_escape) {
-      if (rep) {
-        PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
-               rep->op()->mnemonic());
-      } else {
-        PrintF("Replacement of #%d cleared\n", node->id());
-      }
+    if (rep) {
+      TRACE("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+            rep->op()->mnemonic());
+    } else {
+      TRACE("Replacement of #%d cleared\n", node->id());
     }
     return true;
   }
   return false;
 }
 
-
 Node* EscapeAnalysis::ResolveReplacement(Node* node) {
   while (replacement(node)) {
     node = replacement(node);
@@ -1131,12 +1205,10 @@
   return node;
 }
 
-
 Node* EscapeAnalysis::GetReplacement(Node* node) {
   return GetReplacement(node->id());
 }
 
-
 Node* EscapeAnalysis::GetReplacement(NodeId id) {
   Node* node = nullptr;
   while (replacement(id)) {
@@ -1146,50 +1218,31 @@
   return node;
 }
 
-
 bool EscapeAnalysis::IsVirtual(Node* node) {
-  if (node->id() >= escape_status_.size()) {
+  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
     return false;
   }
-  return escape_status_.IsVirtual(node);
+  return status_analysis_.IsVirtual(node);
 }
 
-
 bool EscapeAnalysis::IsEscaped(Node* node) {
-  if (node->id() >= escape_status_.size()) {
+  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
     return false;
   }
-  return escape_status_.IsEscaped(node);
+  return status_analysis_.IsEscaped(node);
 }
 
-
 bool EscapeAnalysis::SetEscaped(Node* node) {
-  return escape_status_.SetEscaped(node);
+  return status_analysis_.SetEscaped(node);
 }
 
-
 VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
   if (VirtualState* states = virtual_states_[at->id()]) {
-    return states->VirtualObjectFromAlias(aliases_[id]);
+    return states->VirtualObjectFromAlias(GetAlias(id));
   }
   return nullptr;
 }
 
-
-VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
-                                                    Node* node) {
-  VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
-  while (obj && replacement(obj->id())) {
-    if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
-      obj = next;
-    } else {
-      break;
-    }
-  }
-  return obj;
-}
-
-
 bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
   DCHECK(IsVirtual(left) && IsVirtual(right));
   left = ResolveReplacement(left);
@@ -1200,83 +1253,78 @@
   return false;
 }
 
-
 int EscapeAnalysis::OffsetFromAccess(Node* node) {
   DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
   return OpParameter<FieldAccess>(node).offset / kPointerSize;
 }
 
-
-void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
                                         VirtualState* state) {
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Load #%d from phi #%d", node->id(), from->id());
-  }
+  TRACE("Load #%d from phi #%d", load->id(), from->id());
 
   cache_->fields().clear();
-  for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
-    Node* input = NodeProperties::GetValueInput(node, i);
+  for (int i = 0; i < load->op()->ValueInputCount(); ++i) {
+    Node* input = NodeProperties::GetValueInput(load, i);
     cache_->fields().push_back(input);
   }
 
-  cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+  cache_->LoadVirtualObjectsForFieldsFrom(state,
+                                          status_analysis_.GetAliasMap());
   if (cache_->objects().size() == cache_->fields().size()) {
     cache_->GetFields(offset);
     if (cache_->fields().size() == cache_->objects().size()) {
-      Node* rep = replacement(node);
+      Node* rep = replacement(load);
       if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
         int value_input_count = static_cast<int>(cache_->fields().size());
         cache_->fields().push_back(NodeProperties::GetControlInput(from));
         Node* phi = graph()->NewNode(
             common()->Phi(MachineRepresentation::kTagged, value_input_count),
             value_input_count + 1, &cache_->fields().front());
-        escape_status_.Resize();
-        SetReplacement(node, phi);
-        state->LastChangedAt(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" got phi created.\n");
-        }
-      } else if (FLAG_trace_turbo_escape) {
-        PrintF(" has already phi #%d.\n", rep->id());
+        status_analysis_.ResizeStatusVector();
+        SetReplacement(load, phi);
+        TRACE(" got phi created.\n");
+      } else {
+        TRACE(" has already phi #%d.\n", rep->id());
       }
-    } else if (FLAG_trace_turbo_escape) {
-      PrintF(" has incomplete field info.\n");
+    } else {
+      TRACE(" has incomplete field info.\n");
     }
-  } else if (FLAG_trace_turbo_escape) {
-    PrintF(" has incomplete virtual object info.\n");
+  } else {
+    TRACE(" has incomplete virtual object info.\n");
   }
 }
 
-
 void EscapeAnalysis::ProcessLoadField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
   ForwardVirtualState(node);
-  Node* from = NodeProperties::GetValueInput(node, 0);
+  Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
-  if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+  if (VirtualObject* object = GetVirtualObject(state, from)) {
     int offset = OffsetFromAccess(node);
-    if (!object->IsTracked()) return;
+    if (!object->IsTracked() ||
+        static_cast<size_t>(offset) >= object->field_count()) {
+      return;
+    }
     Node* value = object->GetField(offset);
     if (value) {
       value = ResolveReplacement(value);
     }
     // Record that the load has this alias.
     UpdateReplacement(state, node, value);
+  } else if (from->opcode() == IrOpcode::kPhi &&
+             OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+    int offset = OffsetFromAccess(node);
+    // Only binary phis are supported for now.
+    ProcessLoadFromPhi(offset, from, node, state);
   } else {
-    if (from->opcode() == IrOpcode::kPhi &&
-        OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
-      int offset = OffsetFromAccess(node);
-      // Only binary phis are supported for now.
-      ProcessLoadFromPhi(offset, from, node, state);
-    }
+    UpdateReplacement(state, node, nullptr);
   }
 }
 
-
 void EscapeAnalysis::ProcessLoadElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
   ForwardVirtualState(node);
-  Node* from = NodeProperties::GetValueInput(node, 0);
+  Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
   Node* index_node = node->InputAt(1);
   NumberMatcher index(index_node);
@@ -1287,12 +1335,16 @@
   ElementAccess access = OpParameter<ElementAccess>(node);
   if (index.HasValue()) {
     int offset = index.Value() + access.header_size / kPointerSize;
-    if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+    if (VirtualObject* object = GetVirtualObject(state, from)) {
       CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
                kPointerSizeLog2);
       CHECK_EQ(access.header_size % kPointerSize, 0);
 
-      if (!object->IsTracked()) return;
+      if (!object->IsTracked() ||
+          static_cast<size_t>(offset) >= object->field_count()) {
+        return;
+      }
+
       Node* value = object->GetField(offset);
       if (value) {
         value = ResolveReplacement(value);
@@ -1303,43 +1355,42 @@
       ElementAccess access = OpParameter<ElementAccess>(node);
       int offset = index.Value() + access.header_size / kPointerSize;
       ProcessLoadFromPhi(offset, from, node, state);
+    } else {
+      UpdateReplacement(state, node, nullptr);
     }
   } else {
     // We have a load from a non-const index, cannot eliminate object.
     if (SetEscaped(from)) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF(
-            "Setting #%d (%s) to escaped because store element #%d to "
-            "non-const "
-            "index #%d (%s)\n",
-            from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
-            index_node->op()->mnemonic());
-      }
+      TRACE(
+          "Setting #%d (%s) to escaped because load element #%d from non-const "
+          "index #%d (%s)\n",
+          from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+          index_node->op()->mnemonic());
     }
   }
 }
 
-
 void EscapeAnalysis::ProcessStoreField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
   ForwardVirtualState(node);
-  Node* to = NodeProperties::GetValueInput(node, 0);
-  Node* val = NodeProperties::GetValueInput(node, 1);
+  Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
-  if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
-    if (!obj->IsTracked()) return;
-    int offset = OffsetFromAccess(node);
-    if (obj->SetField(offset, ResolveReplacement(val))) {
-      state->LastChangedAt(node);
+  VirtualObject* obj = GetVirtualObject(state, to);
+  int offset = OffsetFromAccess(node);
+  if (obj && obj->IsTracked() &&
+      static_cast<size_t>(offset) < obj->field_count()) {
+    Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
+    if (obj->GetField(offset) != val) {
+      obj = CopyForModificationAt(obj, state, node);
+      obj->SetField(offset, val);
     }
   }
 }
 
-
 void EscapeAnalysis::ProcessStoreElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
   ForwardVirtualState(node);
-  Node* to = NodeProperties::GetValueInput(node, 0);
+  Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   Node* index_node = node->InputAt(1);
   NumberMatcher index(index_node);
   DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
@@ -1347,41 +1398,47 @@
          index_node->opcode() != IrOpcode::kFloat32Constant &&
          index_node->opcode() != IrOpcode::kFloat64Constant);
   ElementAccess access = OpParameter<ElementAccess>(node);
-  Node* val = NodeProperties::GetValueInput(node, 2);
+  VirtualState* state = virtual_states_[node->id()];
+  VirtualObject* obj = GetVirtualObject(state, to);
   if (index.HasValue()) {
     int offset = index.Value() + access.header_size / kPointerSize;
-    VirtualState* states = virtual_states_[node->id()];
-    if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
-      if (!obj->IsTracked()) return;
+    if (obj && obj->IsTracked() &&
+        static_cast<size_t>(offset) < obj->field_count()) {
       CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
                kPointerSizeLog2);
       CHECK_EQ(access.header_size % kPointerSize, 0);
-      if (obj->SetField(offset, ResolveReplacement(val))) {
-        states->LastChangedAt(node);
+      Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
+      if (obj->GetField(offset) != val) {
+        obj = CopyForModificationAt(obj, state, node);
+        obj->SetField(offset, val);
       }
     }
   } else {
     // We have a store to a non-const index, cannot eliminate object.
     if (SetEscaped(to)) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF(
-            "Setting #%d (%s) to escaped because store element #%d to "
-            "non-const "
-            "index #%d (%s)\n",
-            to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
-            index_node->op()->mnemonic());
+      TRACE(
+          "Setting #%d (%s) to escaped because store element #%d to non-const "
+          "index #%d (%s)\n",
+          to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+          index_node->op()->mnemonic());
+    }
+    if (obj && obj->IsTracked()) {
+      if (!obj->AllFieldsClear()) {
+        obj = CopyForModificationAt(obj, state, node);
+        obj->ClearAllFields();
+        TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+              obj->id());
       }
     }
   }
 }
 
-
 Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
   if ((node->opcode() == IrOpcode::kFinishRegion ||
        node->opcode() == IrOpcode::kAllocate) &&
       IsVirtual(node)) {
-    if (VirtualObject* vobj =
-            ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+    if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
+                                               ResolveReplacement(node))) {
       if (Node* object_state = vobj->GetObjectState()) {
         return object_state;
       } else {
@@ -1396,13 +1453,11 @@
             graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
                              input_count, &cache_->fields().front());
         vobj->SetObjectState(new_object_state);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(
-              "Creating object state #%d for vobj %p (from node #%d) at effect "
-              "#%d\n",
-              new_object_state->id(), static_cast<void*>(vobj), node->id(),
-              effect->id());
-        }
+        TRACE(
+            "Creating object state #%d for vobj %p (from node #%d) at effect "
+            "#%d\n",
+            new_object_state->id(), static_cast<void*>(vobj), node->id(),
+            effect->id());
         // Now fix uses of other objects.
         for (size_t i = 0; i < vobj->field_count(); ++i) {
           if (Node* field = vobj->GetField(i)) {
@@ -1420,7 +1475,6 @@
   return nullptr;
 }
 
-
 void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
   PrintF("  Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
          object->field_count());
@@ -1431,9 +1485,8 @@
   }
 }
 
-
 void EscapeAnalysis::DebugPrintState(VirtualState* state) {
-  PrintF("Dumping object state %p\n", static_cast<void*>(state));
+  PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
   for (Alias alias = 0; alias < AliasCount(); ++alias) {
     if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
       DebugPrintObject(object, alias);
@@ -1441,7 +1494,6 @@
   }
 }
 
-
 void EscapeAnalysis::DebugPrint() {
   ZoneVector<VirtualState*> object_states(zone());
   for (NodeId id = 0; id < virtual_states_.size(); id++) {
@@ -1457,15 +1509,26 @@
   }
 }
 
-
 VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
                                                 Node* node) {
-  if (node->id() >= aliases_.size()) return nullptr;
-  Alias alias = aliases_[node->id()];
+  if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
+  Alias alias = GetAlias(node->id());
   if (alias >= state->size()) return nullptr;
   return state->VirtualObjectFromAlias(alias);
 }
 
+bool EscapeAnalysis::ExistsVirtualAllocate() {
+  for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
+    Alias alias = GetAlias(static_cast<NodeId>(id));
+    if (alias < EscapeStatusAnalysis::kUntrackable) {
+      if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index ea7b11e..c3f236d 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -18,34 +18,63 @@
 class VirtualState;
 class VirtualObject;
 
-
 // EscapeStatusAnalysis determines for each allocation whether it escapes.
 class EscapeStatusAnalysis {
  public:
+  typedef NodeId Alias;
   ~EscapeStatusAnalysis();
 
-  enum EscapeStatusFlag {
+  enum Status {
     kUnknown = 0u,
     kTracked = 1u << 0,
     kEscaped = 1u << 1,
     kOnStack = 1u << 2,
     kVisited = 1u << 3,
+    // A node is dangling, if it is a load of some kind, and does not have
+    // an effect successor.
+    kDanglingComputed = 1u << 4,
+    kDangling = 1u << 5,
+    // A node is is an effect branch point, if it has more than 2 non-dangling
+    // effect successors.
+    kBranchPointComputed = 1u << 6,
+    kBranchPoint = 1u << 7,
+    kInQueue = 1u << 8
   };
-  typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+  typedef base::Flags<Status, uint16_t> StatusFlags;
 
-  void Run();
+  void RunStatusAnalysis();
 
   bool IsVirtual(Node* node);
   bool IsEscaped(Node* node);
   bool IsAllocation(Node* node);
 
+  bool IsInQueue(NodeId id);
+  void SetInQueue(NodeId id, bool on_stack);
+
   void DebugPrint();
 
-  friend class EscapeAnalysis;
-
- private:
   EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
                        Zone* zone);
+  void EnqueueForStatusAnalysis(Node* node);
+  bool SetEscaped(Node* node);
+  bool IsEffectBranchPoint(Node* node);
+  bool IsDanglingEffectNode(Node* node);
+  void ResizeStatusVector();
+  size_t GetStatusVectorSize();
+  bool IsVirtual(NodeId id);
+
+  Graph* graph() const { return graph_; }
+  Zone* zone() const { return zone_; }
+  void AssignAliases();
+  Alias GetAlias(NodeId id) const { return aliases_[id]; }
+  const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+  Alias AliasCount() const { return next_free_alias_; }
+  static const Alias kNotReachable;
+  static const Alias kUntrackable;
+
+  bool IsNotReachable(Node* node);
+
+ private:
   void Process(Node* node);
   void ProcessAllocate(Node* node);
   void ProcessFinishRegion(Node* node);
@@ -57,38 +86,35 @@
   bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
   void RevisitUses(Node* node);
   void RevisitInputs(Node* node);
-  bool SetEscaped(Node* node);
+
+  Alias NextAlias() { return next_free_alias_++; }
+
   bool HasEntry(Node* node);
-  void Resize();
-  size_t size();
+
   bool IsAllocationPhi(Node* node);
 
-  Graph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
-
+  ZoneVector<Node*> stack_;
   EscapeAnalysis* object_analysis_;
   Graph* const graph_;
   Zone* const zone_;
-  ZoneVector<EscapeStatusFlags> status_;
-  ZoneDeque<Node*> queue_;
+  ZoneVector<StatusFlags> status_;
+  Alias next_free_alias_;
+  ZoneVector<Node*> status_stack_;
+  ZoneVector<Alias> aliases_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
 };
 
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
-
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
 
 // Forward Declaration.
 class MergeCache;
 
-
 // EscapeObjectAnalysis simulates stores to determine values of loads if
 // an object is virtual and eliminated.
 class EscapeAnalysis {
  public:
-  typedef NodeId Alias;
-
+  using Alias = EscapeStatusAnalysis::Alias;
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
 
@@ -99,10 +125,10 @@
   bool IsEscaped(Node* node);
   bool CompareVirtualObjects(Node* left, Node* right);
   Node* GetOrCreateObjectState(Node* effect, Node* node);
+  bool ExistsVirtualAllocate();
 
  private:
   void RunObjectAnalysis();
-  void AssignAliases();
   bool Process(Node* node);
   void ProcessLoadField(Node* node);
   void ProcessStoreField(Node* node);
@@ -118,13 +144,11 @@
                           VirtualState* states);
 
   void ForwardVirtualState(Node* node);
-  bool IsEffectBranchPoint(Node* node);
-  bool IsDanglingEffectNode(Node* node);
   int OffsetFromAccess(Node* node);
-
+  VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
+  VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
+                                       Node* node);
   VirtualObject* GetVirtualObject(Node* at, NodeId id);
-  VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
-  Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
 
   bool SetEscaped(Node* node);
   Node* replacement(NodeId id);
@@ -140,24 +164,26 @@
   void DebugPrintState(VirtualState* state);
   void DebugPrintObject(VirtualObject* state, Alias id);
 
-  Alias NextAlias() { return next_free_alias_++; }
-  Alias AliasCount() const { return next_free_alias_; }
-
-  Graph* graph() const { return graph_; }
+  Graph* graph() const { return status_analysis_.graph(); }
+  Zone* zone() const { return status_analysis_.zone(); }
   CommonOperatorBuilder* common() const { return common_; }
-  Zone* zone() const { return zone_; }
+  bool IsEffectBranchPoint(Node* node) {
+    return status_analysis_.IsEffectBranchPoint(node);
+  }
+  bool IsDanglingEffectNode(Node* node) {
+    return status_analysis_.IsDanglingEffectNode(node);
+  }
+  bool IsNotReachable(Node* node) {
+    return status_analysis_.IsNotReachable(node);
+  }
+  Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
+  Alias AliasCount() const { return status_analysis_.AliasCount(); }
 
-  static const Alias kNotReachable;
-  static const Alias kUntrackable;
-  Graph* const graph_;
+  EscapeStatusAnalysis status_analysis_;
   CommonOperatorBuilder* const common_;
-  Zone* const zone_;
   ZoneVector<VirtualState*> virtual_states_;
   ZoneVector<Node*> replacements_;
-  EscapeStatusAnalysis escape_status_;
   MergeCache* cache_;
-  ZoneVector<Alias> aliases_;
-  Alias next_free_alias_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
 };
diff --git a/src/compiler/fast-accessor-assembler.cc b/src/compiler/fast-accessor-assembler.cc
index 09d513f..518003b 100644
--- a/src/compiler/fast-accessor-assembler.cc
+++ b/src/compiler/fast-accessor-assembler.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/fast-accessor-assembler.h"
 
 #include "src/base/logging.h"
+#include "src/code-stubs.h"  // For CallApiFunctionStub.
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline.h"
@@ -166,6 +167,46 @@
   assembler_->Bind(&pass);
 }
 
+FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
+    FunctionCallback callback_function, ValueId arg) {
+  CHECK_EQ(kBuilding, state_);
+
+  // Create API function stub.
+  CallApiFunctionStub stub(assembler_->isolate(), true);
+
+  // Wrap the FunctionCallback in an ExternalReference.
+  ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
+  ExternalReference callback(&callback_api_function,
+                             ExternalReference::DIRECT_API_CALL,
+                             assembler_->isolate());
+
+  // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
+  // through to the callback.
+  // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
+  static const int kStackParam = 1;
+  Node* args[] = {
+      // Stub/register parameters:
+      assembler_->Parameter(0),                /* receiver (use accessor's) */
+      assembler_->UndefinedConstant(),         /* call_data (undefined) */
+      assembler_->NullConstant(),              /* holder (null) */
+      assembler_->ExternalConstant(callback),  /* API callback function */
+      assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
+
+      // kStackParam stack parameter(s):
+      FromId(arg),
+
+      // Context parameter. (See Linkage::GetStubCallDescriptor.)
+      assembler_->UndefinedConstant()};
+  CHECK_EQ(5 + kStackParam + 1, arraysize(args));
+
+  Node* call = assembler_->CallN(
+      Linkage::GetStubCallDescriptor(
+          assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+          kStackParam + stub.GetStackParameterCount(),
+          CallDescriptor::kNoFlags),
+      assembler_->HeapConstant(stub.GetCode()), args);
+  return FromRaw(call);
+}
 
 MaybeHandle<Code> FastAccessorAssembler::Build() {
   CHECK_EQ(kBuilding, state_);
@@ -176,9 +217,10 @@
 
   // Export the schedule and call the compiler.
   Schedule* schedule = assembler_->Export();
+  Code::Flags flags = Code::ComputeFlags(Code::STUB);
   MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
       assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
-      schedule, Code::STUB, "FastAccessorAssembler");
+      schedule, flags, "FastAccessorAssembler");
 
   // Update state & return.
   state_ = !code.is_null() ? kBuilt : kError;
diff --git a/src/compiler/fast-accessor-assembler.h b/src/compiler/fast-accessor-assembler.h
index a9df3f0..1cb751d 100644
--- a/src/compiler/fast-accessor-assembler.h
+++ b/src/compiler/fast-accessor-assembler.h
@@ -48,6 +48,7 @@
  public:
   typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
   typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+  typedef v8::FunctionCallback FunctionCallback;
 
   explicit FastAccessorAssembler(Isolate* isolate);
   ~FastAccessorAssembler();
@@ -63,15 +64,13 @@
   void ReturnValue(ValueId value_id);
   void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
   void CheckNotZeroOrReturnNull(ValueId value_id);
-
-  // TODO(vogelheim): Implement a C++ callback.
-  //  void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
-  //     ValueId arg1, ValueId arg2, ...);
-
   LabelId MakeLabel();
   void SetLabel(LabelId label_id);
   void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
 
+  // C++ callback.
+  ValueId Call(FunctionCallback callback, ValueId arg);
+
   // Assemble the code.
   MaybeHandle<Code> Build();
 
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
index ddb55c3..60ff9b5 100644
--- a/src/compiler/frame-states.h
+++ b/src/compiler/frame-states.h
@@ -83,31 +83,20 @@
 };
 
 
-enum ContextCallingMode {
-  CALL_MAINTAINS_NATIVE_CONTEXT,
-  CALL_CHANGES_NATIVE_CONTEXT
-};
-
-
 class FrameStateFunctionInfo {
  public:
   FrameStateFunctionInfo(FrameStateType type, int parameter_count,
                          int local_count,
-                         Handle<SharedFunctionInfo> shared_info,
-                         ContextCallingMode context_calling_mode)
+                         Handle<SharedFunctionInfo> shared_info)
       : type_(type),
         parameter_count_(parameter_count),
         local_count_(local_count),
-        shared_info_(shared_info),
-        context_calling_mode_(context_calling_mode) {}
+        shared_info_(shared_info) {}
 
   int local_count() const { return local_count_; }
   int parameter_count() const { return parameter_count_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   FrameStateType type() const { return type_; }
-  ContextCallingMode context_calling_mode() const {
-    return context_calling_mode_;
-  }
 
   static bool IsJSFunctionType(FrameStateType type) {
     return type == FrameStateType::kJavaScriptFunction ||
@@ -119,7 +108,6 @@
   int const parameter_count_;
   int const local_count_;
   Handle<SharedFunctionInfo> const shared_info_;
-  ContextCallingMode context_calling_mode_;
 };
 
 
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 72f756b..011a0f0 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -34,19 +34,10 @@
 //   determined after register allocation once the number of used callee-saved
 //   register is certain.
 //
-// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
-// two slots.
-//
-// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
-// the callee's saved return address and 1 corresponding to the saved frame
-// pointer. Some frames have additional information stored in the fixed header,
-// for example JSFunctions store the function context and marker in the fixed
-// header, with slot index 2 corresponding to the current function context and 3
-// corresponding to the frame marker/JSFunction. The frame region immediately
-// below the fixed header contains spill slots starting at 4 for JsFunctions.
-// The callee-saved frame region below that starts at 4+spill_slot_count_.
-// Callee stack slots corresponding to parameters are accessible through
-// negative slot ids.
+// The frame region immediately below the fixed header contains spill slots
+// starting at slot 4 for JSFunctions.  The callee-saved frame region below that
+// starts at 4+spill_slot_count_.  Callee stack slots corresponding to
+// parameters are accessible through negative slot ids.
 //
 // Every slot of a caller or callee frame is accessible by the register
 // allocator and gap resolver with a SpillSlotOperand containing its
@@ -76,13 +67,13 @@
 //       |- - - - - - - - -|   |                   frame slots
 //  ...  |      ...        | Spill slots           (slot >= 0)
 //       |- - - - - - - - -|   |                        |
-//  m+4  |    spill m      |   v                        |
+//  m+3  |    spill m      |   v                        |
 //       +-----------------+----                        |
-//  m+5  |  callee-saved 1 |   ^                        |
+//  m+4  |  callee-saved 1 |   ^                        |
 //       |- - - - - - - - -|   |                        |
 //       |      ...        | Callee-saved               |
 //       |- - - - - - - - -|   |                        |
-// m+r+4 |  callee-saved r |   v                        v
+// m+r+3 |  callee-saved r |   v                        v
 //  -----+-----------------+----- <-- stack ptr -------------
 //
 class Frame : public ZoneObject {
@@ -90,16 +81,6 @@
   explicit Frame(int fixed_frame_size_in_slots,
                  const CallDescriptor* descriptor);
 
-  static int FPOffsetToSlot(int frame_offset) {
-    return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
-           frame_offset / kPointerSize;
-  }
-
-  static int SlotToFPOffset(int slot) {
-    return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
-           kPointerSize;
-  }
-
   inline bool needs_frame() const { return needs_frame_; }
   inline void MarkNeedsFrame() { needs_frame_ = true; }
 
diff --git a/src/compiler/graph-trimmer.cc b/src/compiler/graph-trimmer.cc
index 5fae425..75071c6 100644
--- a/src/compiler/graph-trimmer.cc
+++ b/src/compiler/graph-trimmer.cc
@@ -24,7 +24,8 @@
   MarkAsLive(graph()->end());
   // Compute transitive closure of live nodes.
   for (size_t i = 0; i < live_.size(); ++i) {
-    for (Node* const input : live_[i]->inputs()) MarkAsLive(input);
+    Node* const live = live_[i];
+    for (Node* const input : live->inputs()) MarkAsLive(input);
   }
   // Remove dead->live edges.
   for (Node* const live : live_) {
diff --git a/src/compiler/graph-trimmer.h b/src/compiler/graph-trimmer.h
index d8258be..98d335a 100644
--- a/src/compiler/graph-trimmer.h
+++ b/src/compiler/graph-trimmer.h
@@ -28,14 +28,18 @@
   // or any of the roots in the sequence [{begin},{end}[.
   template <typename ForwardIterator>
   void TrimGraph(ForwardIterator begin, ForwardIterator end) {
-    while (begin != end) MarkAsLive(*begin++);
+    while (begin != end) {
+      Node* const node = *begin++;
+      if (!node->IsDead()) MarkAsLive(node);
+    }
     TrimGraph();
   }
 
  private:
   V8_INLINE bool IsLive(Node* const node) { return is_live_.Get(node); }
   V8_INLINE void MarkAsLive(Node* const node) {
-    if (!node->IsDead() && !IsLive(node)) {
+    DCHECK(!node->IsDead());
+    if (!IsLive(node)) {
       is_live_.Set(node, true);
       live_.push_back(node);
     }
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index 3d4d6da..ba69617 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -42,17 +42,15 @@
   decorators_.erase(it);
 }
 
-
-Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
+Node* Graph::NewNode(const Operator* op, int input_count, Node* const* inputs,
                      bool incomplete) {
   Node* node = NewNodeUnchecked(op, input_count, inputs, incomplete);
   Verifier::VerifyNode(node);
   return node;
 }
 
-
 Node* Graph::NewNodeUnchecked(const Operator* op, int input_count,
-                              Node** inputs, bool incomplete) {
+                              Node* const* inputs, bool incomplete) {
   Node* const node =
       Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
   Decorate(node);
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index b53c7fd..958a15d 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -34,16 +34,16 @@
   explicit Graph(Zone* zone);
 
   // Base implementation used by all factory methods.
-  Node* NewNodeUnchecked(const Operator* op, int input_count, Node** inputs,
-                         bool incomplete = false);
+  Node* NewNodeUnchecked(const Operator* op, int input_count,
+                         Node* const* inputs, bool incomplete = false);
 
   // Factory that checks the input count.
-  Node* NewNode(const Operator* op, int input_count, Node** inputs,
+  Node* NewNode(const Operator* op, int input_count, Node* const* inputs,
                 bool incomplete = false);
 
   // Factories for nodes with static input counts.
   Node* NewNode(const Operator* op) {
-    return NewNode(op, 0, static_cast<Node**>(nullptr));
+    return NewNode(op, 0, static_cast<Node* const*>(nullptr));
   }
   Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
   Node* NewNode(const Operator* op, Node* n1, Node* n2) {
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index f63bc22..1f61af8 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/frames.h"
 #include "src/ia32/assembler-ia32.h"
 #include "src/ia32/frames-ia32.h"
 #include "src/ia32/macro-assembler-ia32.h"
@@ -56,7 +57,7 @@
 
   Operand ToMaterializableOperand(int materializable_offset) {
     FrameOffset offset = frame_access_state()->GetFrameOffset(
-        Frame::FPOffsetToSlot(materializable_offset));
+        FPOffsetToFrameSlot(materializable_offset));
     return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
   }
 
@@ -241,15 +242,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ lea(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -413,11 +415,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -471,6 +468,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), ebp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ mov(i.OutputRegister(), Operand(ebp, 0));
+      } else {
+        __ mov(i.OutputRegister(), ebp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -499,6 +503,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = esp;
+      } else {
+        base = ebp;
+      }
+      __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -514,17 +530,37 @@
       }
       break;
     case kIA32Cmp:
-      if (HasImmediateInput(instr, 1)) {
-        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ cmp(operand, i.InputImmediate(index));
+        } else {
+          __ cmp(operand, i.InputRegister(index));
+        }
       } else {
-        __ cmp(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ cmp(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ cmp(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kIA32Test:
-      if (HasImmediateInput(instr, 1)) {
-        __ test(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ test(operand, i.InputImmediate(index));
+        } else {
+          __ test(i.InputRegister(index), operand);
+        }
       } else {
-        __ test(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ test(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ test(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kIA32Imul:
@@ -739,6 +775,21 @@
     case kSSEFloat64ToFloat32:
       __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kSSEFloat32ToInt32:
+      __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat32ToUint32: {
+      Label success;
+      __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      __ test(i.OutputRegister(), i.OutputRegister());
+      __ j(positive, &success);
+      __ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
+      __ addss(kScratchDoubleReg, i.InputOperand(0));
+      __ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
+      __ or_(i.OutputRegister(), Immediate(0x80000000));
+      __ bind(&success);
+      break;
+    }
     case kSSEFloat64ToInt32:
       __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
       break;
@@ -749,6 +800,16 @@
       __ add(i.OutputRegister(), Immediate(0x80000000));
       break;
     }
+    case kSSEInt32ToFloat32:
+      __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEUint32ToFloat32: {
+      Register scratch0 = i.TempRegister(0);
+      Register scratch1 = i.TempRegister(1);
+      __ mov(scratch0, i.InputOperand(0));
+      __ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
+      break;
+    }
     case kSSEInt32ToFloat64:
       __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
@@ -1441,8 +1502,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 816487d..61fd035 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -58,8 +58,12 @@
   V(SSEFloat64Round)               \
   V(SSEFloat32ToFloat64)           \
   V(SSEFloat64ToFloat32)           \
+  V(SSEFloat32ToInt32)             \
+  V(SSEFloat32ToUint32)            \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat32)             \
+  V(SSEUint32ToFloat32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
   V(SSEFloat64ExtractLowWord32)    \
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 0a8fcac..093bc22 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -61,8 +61,12 @@
     case kSSEFloat64Round:
     case kSSEFloat32ToFloat64:
     case kSSEFloat64ToFloat32:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat32ToUint32:
     case kSSEFloat64ToInt32:
     case kSSEFloat64ToUint32:
+    case kSSEInt32ToFloat32:
+    case kSSEUint32ToFloat32:
     case kSSEInt32ToFloat64:
     case kSSEUint32ToFloat64:
     case kSSEFloat64ExtractLowWord32:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 0906452..f649ba9 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -190,7 +190,8 @@
     case MachineRepresentation::kWord32:
       opcode = kIA32Movl;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -275,7 +276,8 @@
       case MachineRepresentation::kWord32:
         opcode = kIA32Movl;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -327,9 +329,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -373,9 +376,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -508,9 +512,10 @@
 void VisitMulHigh(InstructionSelector* selector, Node* node,
                   ArchOpcode opcode) {
   IA32OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(eax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 
@@ -525,9 +530,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   IA32OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempRegister(eax)};
   selector->Emit(opcode, g.DefineAsFixed(node, edx),
                  g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
@@ -591,6 +597,9 @@
 }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   IA32OperandGenerator g(this);
   Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -695,6 +704,19 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRO(this, node, kSSEInt32ToFloat32);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+       arraysize(temps), temps);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRO(this, node, kSSEInt32ToFloat64);
 }
@@ -705,6 +727,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRO(this, node, kSSEFloat32ToInt32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRO(this, node, kSSEFloat32ToUint32);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRO(this, node, kSSEFloat64ToInt32);
 }
@@ -958,6 +990,46 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  IA32OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation load_representation =
+      LoadRepresentationOf(input->op()).representation();
+  if (load_representation == MachineRepresentation::kWord32 ||
+      load_representation == MachineRepresentation::kTagged) {
+    return opcode == kIA32Cmp || opcode == kIA32Test;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1003,26 +1075,41 @@
   VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 6c31ac8..d2144cf 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -47,7 +47,6 @@
   V(ArchPrepareCallCFunction)      \
   V(ArchCallCFunction)             \
   V(ArchPrepareTailCall)           \
-  V(ArchLazyBailout)               \
   V(ArchJmp)                       \
   V(ArchLookupSwitch)              \
   V(ArchTableSwitch)               \
@@ -57,6 +56,7 @@
   V(ArchRet)                       \
   V(ArchStackPointer)              \
   V(ArchFramePointer)              \
+  V(ArchParentFramePointer)        \
   V(ArchTruncateDoubleToI)         \
   V(ArchStoreWithWriteBarrier)     \
   V(CheckedLoadInt8)               \
@@ -72,7 +72,8 @@
   V(CheckedStoreWord32)            \
   V(CheckedStoreWord64)            \
   V(CheckedStoreFloat32)           \
-  V(CheckedStoreFloat64)
+  V(CheckedStoreFloat64)           \
+  V(ArchStackSlot)
 
 #define ARCH_OPCODE_LIST(V)  \
   COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index 2f329ea..adbfd5d 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -5,11 +5,57 @@
 #include "src/compiler/instruction-scheduler.h"
 
 #include "src/base/adapters.h"
+#include "src/base/utils/random-number-generator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Compare the two nodes and return true if node1 is a better candidate than
+// node2 (i.e. node1 should be scheduled before node2).
+bool InstructionScheduler::CriticalPathFirstQueue::CompareNodes(
+    ScheduleGraphNode *node1, ScheduleGraphNode *node2) const {
+  return node1->total_latency() > node2->total_latency();
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
+  DCHECK(!IsEmpty());
+  auto candidate = nodes_.end();
+  for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
+    // We only consider instructions that have all their operands ready and
+    // we try to schedule the critical path first.
+    if (cycle >= (*iterator)->start_cycle()) {
+      if ((candidate == nodes_.end()) || CompareNodes(*iterator, *candidate)) {
+        candidate = iterator;
+      }
+    }
+  }
+
+  if (candidate != nodes_.end()) {
+    ScheduleGraphNode *result = *candidate;
+    nodes_.erase(candidate);
+    return result;
+  }
+
+  return nullptr;
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) {
+  DCHECK(!IsEmpty());
+  // Choose a random element from the ready list.
+  auto candidate = nodes_.begin();
+  std::advance(candidate, isolate()->random_number_generator()->NextInt(
+      static_cast<int>(nodes_.size())));
+  ScheduleGraphNode *result = *candidate;
+  nodes_.erase(candidate);
+  return result;
+}
+
+
 InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
     Zone* zone,
     Instruction* instr)
@@ -50,7 +96,11 @@
 
 
 void InstructionScheduler::EndBlock(RpoNumber rpo) {
-  ScheduleBlock();
+  if (FLAG_turbo_stress_instruction_scheduling) {
+    ScheduleBlock<StressSchedulerQueue>();
+  } else {
+    ScheduleBlock<CriticalPathFirstQueue>();
+  }
   sequence()->EndBlock(rpo);
   graph_.clear();
   last_side_effect_instr_ = nullptr;
@@ -110,14 +160,9 @@
 }
 
 
-bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
-                                        ScheduleGraphNode *node2) const {
-  return node1->total_latency() > node2->total_latency();
-}
-
-
+template <typename QueueType>
 void InstructionScheduler::ScheduleBlock() {
-  ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+  QueueType ready_list(this);
 
   // Compute total latencies so that we can schedule the critical path first.
   ComputeTotalLatencies();
@@ -125,43 +170,28 @@
   // Add nodes which don't have dependencies to the ready list.
   for (auto node : graph_) {
     if (!node->HasUnscheduledPredecessor()) {
-      ready_list.push_back(node);
+      ready_list.AddNode(node);
     }
   }
 
   // Go through the ready list and schedule the instructions.
   int cycle = 0;
-  while (!ready_list.empty()) {
-    auto candidate = ready_list.end();
-    for (auto iterator = ready_list.begin(); iterator != ready_list.end();
-         ++iterator) {
-      // Look for the best candidate to schedule.
-      // We only consider instructions that have all their operands ready and
-      // we try to schedule the critical path first (we look for the instruction
-      // with the highest latency on the path to reach the end of the graph).
-      if (cycle >= (*iterator)->start_cycle()) {
-        if ((candidate == ready_list.end()) ||
-            CompareNodes(*iterator, *candidate)) {
-          candidate = iterator;
-        }
-      }
-    }
+  while (!ready_list.IsEmpty()) {
+    auto candidate = ready_list.PopBestCandidate(cycle);
 
-    if (candidate != ready_list.end()) {
-      sequence()->AddInstruction((*candidate)->instruction());
+    if (candidate != nullptr) {
+      sequence()->AddInstruction(candidate->instruction());
 
-      for (auto successor : (*candidate)->successors()) {
+      for (auto successor : candidate->successors()) {
         successor->DropUnscheduledPredecessor();
         successor->set_start_cycle(
             std::max(successor->start_cycle(),
-                     cycle + (*candidate)->latency()));
+                     cycle + candidate->latency()));
 
         if (!successor->HasUnscheduledPredecessor()) {
-          ready_list.push_back(successor);
+          ready_list.AddNode(successor);
         }
       }
-
-      ready_list.erase(candidate);
     }
 
     cycle++;
@@ -172,17 +202,22 @@
 int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
   switch (instr->arch_opcode()) {
     case kArchNop:
-    case kArchStackPointer:
     case kArchFramePointer:
+    case kArchParentFramePointer:
     case kArchTruncateDoubleToI:
+    case kArchStackSlot:
       return kNoOpcodeFlags;
 
+    case kArchStackPointer:
+      // ArchStackPointer instruction loads the current stack pointer value and
+      // must not be reordered with instruction with side effects.
+      return kIsLoadOperation;
+
     case kArchPrepareCallCFunction:
     case kArchPrepareTailCall:
     case kArchCallCFunction:
     case kArchCallCodeObject:
     case kArchCallJSFunction:
-    case kArchLazyBailout:
       return kHasSideEffect;
 
     case kArchTailCallCodeObject:
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index fafbe47..104c0b9 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -90,11 +90,66 @@
     int start_cycle_;
   };
 
-  // Compare the two nodes and return true if node1 is a better candidate than
-  // node2 (i.e. node1 should be scheduled before node2).
-  bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+  // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
+  // have been scheduled. Note that this class is inteded to be extended by
+  // concrete implementation of the scheduling queue which define the policy
+  // to pop node from the queue.
+  class SchedulingQueueBase {
+   public:
+    explicit SchedulingQueueBase(InstructionScheduler* scheduler)
+      : scheduler_(scheduler),
+        nodes_(scheduler->zone()) {
+    }
 
-  // Perform scheduling for the current block.
+    void AddNode(ScheduleGraphNode* node) {
+      nodes_.push_back(node);
+    }
+
+    bool IsEmpty() const {
+      return nodes_.empty();
+    }
+
+   protected:
+    InstructionScheduler* scheduler_;
+    ZoneLinkedList<ScheduleGraphNode*> nodes_;
+  };
+
+  // A scheduling queue which prioritize nodes on the critical path (we look
+  // for the instruction with the highest latency on the path to reach the end
+  // of the graph).
+  class CriticalPathFirstQueue : public SchedulingQueueBase  {
+   public:
+    explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
+      : SchedulingQueueBase(scheduler) { }
+
+    // Look for the best candidate to schedule, remove it from the queue and
+    // return it.
+    ScheduleGraphNode* PopBestCandidate(int cycle);
+
+   private:
+    // Compare the two nodes and return true if node1 is a better candidate than
+    // node2 (i.e. node1 should be scheduled before node2).
+    bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+  };
+
+  // A queue which pop a random node from the queue to perform stress tests on
+  // the scheduler.
+  class StressSchedulerQueue : public SchedulingQueueBase  {
+   public:
+    explicit StressSchedulerQueue(InstructionScheduler* scheduler)
+      : SchedulingQueueBase(scheduler) { }
+
+    ScheduleGraphNode* PopBestCandidate(int cycle);
+
+   private:
+    Isolate *isolate() {
+      return scheduler_->isolate();
+    }
+  };
+
+  // Perform scheduling for the current block specifying the queue type to
+  // use to determine the next best candidate.
+  template <typename QueueType>
   void ScheduleBlock();
 
   // Return the scheduling properties of the given instruction.
@@ -134,6 +189,7 @@
 
   Zone* zone() { return zone_; }
   InstructionSequence* sequence() { return sequence_; }
+  Isolate* isolate() { return sequence()->isolate(); }
 
   Zone* zone_;
   InstructionSequence* sequence_;
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 86868e5..0f27e50 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -21,7 +21,7 @@
 InstructionSelector::InstructionSelector(
     Zone* zone, size_t node_count, Linkage* linkage,
     InstructionSequence* sequence, Schedule* schedule,
-    SourcePositionTable* source_positions,
+    SourcePositionTable* source_positions, Frame* frame,
     SourcePositionMode source_position_mode, Features features)
     : zone_(zone),
       linkage_(linkage),
@@ -34,9 +34,11 @@
       instructions_(zone),
       defined_(node_count, false, zone),
       used_(node_count, false, zone),
+      effect_level_(node_count, 0, zone),
       virtual_registers_(node_count,
                          InstructionOperand::kInvalidVirtualRegister, zone),
-      scheduler_(nullptr) {
+      scheduler_(nullptr),
+      frame_(frame) {
   instructions_.reserve(node_count);
 }
 
@@ -217,10 +219,11 @@
 
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
   return node->OwnedBy(user) &&
-         schedule()->block(node) == schedule()->block(user);
+         schedule()->block(node) == schedule()->block(user) &&
+         (node->op()->HasProperty(Operator::kPure) ||
+          GetEffectLevel(node) == GetEffectLevel(user));
 }
 
-
 int InstructionSelector::GetVirtualRegister(const Node* node) {
   DCHECK_NOT_NULL(node);
   size_t const id = node->id();
@@ -279,6 +282,19 @@
   used_[id] = true;
 }
 
+int InstructionSelector::GetEffectLevel(Node* node) const {
+  DCHECK_NOT_NULL(node);
+  size_t const id = node->id();
+  DCHECK_LT(id, effect_level_.size());
+  return effect_level_[id];
+}
+
+void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
+  DCHECK_NOT_NULL(node);
+  size_t const id = node->id();
+  DCHECK_LT(id, effect_level_.size());
+  effect_level_[id] = effect_level;
+}
 
 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
                                                const InstructionOperand& op) {
@@ -567,10 +583,6 @@
           g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
                         buffer->descriptor->GetInputType(0).representation()));
       break;
-    case CallDescriptor::kLazyBailout:
-      // The target is ignored, but we still need to pass a value here.
-      buffer->instruction_args.push_back(g.UseImmediate(callee));
-      break;
   }
   DCHECK_EQ(1u, buffer->instruction_args.size());
 
@@ -581,13 +593,29 @@
   size_t frame_state_entries = 0;
   USE(frame_state_entries);  // frame_state_entries is only used for debug.
   if (buffer->frame_state_descriptor != nullptr) {
+    Node* frame_state =
+        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+
+    // If it was a syntactic tail call we need to drop the current frame and
+    // an arguments adaptor frame on top of it (if the latter is present).
+    if (buffer->descriptor->SupportsTailCalls()) {
+      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+      buffer->frame_state_descriptor =
+          buffer->frame_state_descriptor->outer_state();
+
+      if (buffer->frame_state_descriptor != nullptr &&
+          buffer->frame_state_descriptor->type() ==
+              FrameStateType::kArgumentsAdaptor) {
+        frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+        buffer->frame_state_descriptor =
+            buffer->frame_state_descriptor->outer_state();
+      }
+    }
+
     InstructionSequence::StateId state_id =
         sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
     buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
 
-    Node* frame_state =
-        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
-
     StateObjectDeduplicator deduplicator(instruction_zone());
 
     frame_state_entries =
@@ -656,6 +684,16 @@
   current_block_ = block;
   int current_block_end = static_cast<int>(instructions_.size());
 
+  int effect_level = 0;
+  for (Node* const node : *block) {
+    if (node->opcode() == IrOpcode::kStore ||
+        node->opcode() == IrOpcode::kCheckedStore ||
+        node->opcode() == IrOpcode::kCall) {
+      ++effect_level;
+    }
+    SetEffectLevel(node, effect_level);
+  }
+
   // Generate code for the block control "top down", but schedule the code
   // "bottom up".
   VisitControl(block);
@@ -767,7 +805,7 @@
       DCHECK_EQ(IrOpcode::kThrow, input->opcode());
       return VisitThrow(input->InputAt(0));
     case BasicBlock::kNone: {
-      // TODO(titzer): exit block doesn't have control.
+      // Exit block doesn't have control.
       DCHECK_NULL(input);
       break;
     }
@@ -866,6 +904,8 @@
       return MarkAsWord32(node), VisitWord32Clz(node);
     case IrOpcode::kWord32Ctz:
       return MarkAsWord32(node), VisitWord32Ctz(node);
+    case IrOpcode::kWord32ReverseBits:
+      return MarkAsWord32(node), VisitWord32ReverseBits(node);
     case IrOpcode::kWord32Popcnt:
       return MarkAsWord32(node), VisitWord32Popcnt(node);
     case IrOpcode::kWord64Popcnt:
@@ -888,6 +928,8 @@
       return MarkAsWord64(node), VisitWord64Clz(node);
     case IrOpcode::kWord64Ctz:
       return MarkAsWord64(node), VisitWord64Ctz(node);
+    case IrOpcode::kWord64ReverseBits:
+      return MarkAsWord64(node), VisitWord64ReverseBits(node);
     case IrOpcode::kWord64Equal:
       return VisitWord64Equal(node);
     case IrOpcode::kInt32Add:
@@ -956,6 +998,10 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kTruncateFloat32ToInt32:
+      return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
+    case IrOpcode::kTruncateFloat32ToUint32:
+      return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
     case IrOpcode::kTryTruncateFloat32ToInt64:
       return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
     case IrOpcode::kTryTruncateFloat64ToInt64:
@@ -976,10 +1022,14 @@
       return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
     case IrOpcode::kRoundInt64ToFloat32:
       return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+    case IrOpcode::kRoundInt32ToFloat32:
+      return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
     case IrOpcode::kRoundInt64ToFloat64:
       return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
     case IrOpcode::kBitcastFloat32ToInt32:
       return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+    case IrOpcode::kRoundUint32ToFloat32:
+      return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
     case IrOpcode::kRoundUint64ToFloat32:
       return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
     case IrOpcode::kRoundUint64ToFloat64:
@@ -1062,10 +1112,14 @@
       return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
     case IrOpcode::kFloat64InsertHighWord32:
       return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+    case IrOpcode::kStackSlot:
+      return VisitStackSlot(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
     case IrOpcode::kLoadFramePointer:
       return VisitLoadFramePointer(node);
+    case IrOpcode::kLoadParentFramePointer:
+      return VisitLoadParentFramePointer(node);
     case IrOpcode::kCheckedLoad: {
       MachineRepresentation rep =
           CheckedLoadRepresentationOf(node->op()).representation();
@@ -1090,9 +1144,14 @@
 
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   OperandGenerator g(this);
+  frame_->MarkNeedsFrame();
   Emit(kArchFramePointer, g.DefineAsRegister(node));
 }
 
+void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchParentFramePointer, g.DefineAsRegister(node));
+}
 
 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
                                           InstructionOperand& index_operand) {
@@ -1129,6 +1188,14 @@
   Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
 }
 
+void InstructionSelector::VisitStackSlot(Node* node) {
+  int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+  int slot = frame_->AllocateSpillSlot(size);
+  OperandGenerator g(this);
+
+  Emit(kArchStackSlot, g.DefineAsRegister(node),
+       sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
 
 // 32 bit targets do not implement the following instructions.
 #if V8_TARGET_ARCH_32_BIT
@@ -1160,6 +1227,11 @@
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
 
 
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
 
 
@@ -1412,6 +1484,13 @@
     buffer.instruction_args.push_back(g.Label(handler));
   }
 
+  // (arm64 only) caller uses JSSP but callee might destroy it.
+  if (descriptor->UseNativeStack() &&
+      !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
+    flags |= CallDescriptor::kRestoreJSSP;
+  }
+
+
   // Select the appropriate opcode based on the call type.
   InstructionCode opcode = kArchNop;
   switch (descriptor->kind()) {
@@ -1426,9 +1505,6 @@
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction | MiscField::encode(flags);
       break;
-    case CallDescriptor::kLazyBailout:
-      opcode = kArchLazyBailout | MiscField::encode(flags);
-      break;
   }
 
   // Emit the call instruction.
@@ -1585,7 +1661,7 @@
 
 void InstructionSelector::VisitThrow(Node* value) {
   OperandGenerator g(this);
-  Emit(kArchThrowTerminator, g.NoOutput());  // TODO(titzer)
+  Emit(kArchThrowTerminator, g.NoOutput());
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 52aea70..a01cab4 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -52,7 +52,7 @@
   InstructionSelector(
       Zone* zone, size_t node_count, Linkage* linkage,
       InstructionSequence* sequence, Schedule* schedule,
-      SourcePositionTable* source_positions,
+      SourcePositionTable* source_positions, Frame* frame,
       SourcePositionMode source_position_mode = kCallSourcePositions,
       Features features = SupportedFeatures());
 
@@ -149,6 +149,9 @@
   // Checks if {node} is currently live.
   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
 
+  // Gets the effect level of {node}.
+  int GetEffectLevel(Node* node) const;
+
   int GetVirtualRegister(const Node* node);
   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
 
@@ -168,6 +171,9 @@
   // will need to generate code for it.
   void MarkAsUsed(Node* node);
 
+  // Sets the effect level of {node}.
+  void SetEffectLevel(Node* node, int effect_level);
+
   // Inform the register allocation of the representation of the value produced
   // by {node}.
   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
@@ -269,8 +275,10 @@
   ZoneVector<Instruction*> instructions_;
   BoolVector defined_;
   BoolVector used_;
+  IntVector effect_level_;
   IntVector virtual_registers_;
   InstructionScheduler* scheduler_;
+  Frame* frame_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 383e27d..d4ec6bc 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -164,6 +164,9 @@
         case MachineRepresentation::kFloat64:
           os << "|f64";
           break;
+        case MachineRepresentation::kSimd128:
+          os << "|s128";
+          break;
         case MachineRepresentation::kTagged:
           os << "|t";
           break;
@@ -615,6 +618,20 @@
   return blocks;
 }
 
+void InstructionSequence::Validate() {
+  // Validate blocks are in edge-split form: no block with multiple successors
+  // has an edge to a block (== a successor) with more than one predecessors.
+  for (const InstructionBlock* block : instruction_blocks()) {
+    if (block->SuccessorCount() > 1) {
+      for (const RpoNumber& successor_id : block->successors()) {
+        const InstructionBlock* successor = InstructionBlockAt(successor_id);
+        // Expect precisely one predecessor: "block".
+        CHECK(successor->PredecessorCount() == 1 &&
+              successor->predecessors()[0] == block->rpo_number());
+      }
+    }
+  }
+}
 
 void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
   int ao = 0;
@@ -648,6 +665,10 @@
       representations_(zone()),
       deoptimization_entries_(zone()) {
   block_starts_.reserve(instruction_blocks_->size());
+
+#if DEBUG
+  Validate();
+#endif
 }
 
 
@@ -726,6 +747,7 @@
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kTagged:
       return rep;
     case MachineRepresentation::kNone:
@@ -819,6 +841,62 @@
   Print(config);
 }
 
+void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
+                                     int block_id) const {
+  OFStream os(stdout);
+  RpoNumber rpo = RpoNumber::FromInt(block_id);
+  const InstructionBlock* block = InstructionBlockAt(rpo);
+  CHECK(block->rpo_number() == rpo);
+
+  os << "B" << block->rpo_number();
+  os << ": AO#" << block->ao_number();
+  if (block->IsDeferred()) os << " (deferred)";
+  if (!block->needs_frame()) os << " (no frame)";
+  if (block->must_construct_frame()) os << " (construct frame)";
+  if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
+  if (block->IsLoopHeader()) {
+    os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
+       << ")";
+  }
+  os << "  instructions: [" << block->code_start() << ", " << block->code_end()
+     << ")\n  predecessors:";
+
+  for (auto pred : block->predecessors()) {
+    os << " B" << pred.ToInt();
+  }
+  os << "\n";
+
+  for (auto phi : block->phis()) {
+    PrintableInstructionOperand printable_op = {config, phi->output()};
+    os << "     phi: " << printable_op << " =";
+    for (auto input : phi->operands()) {
+      os << " v" << input;
+    }
+    os << "\n";
+  }
+
+  ScopedVector<char> buf(32);
+  PrintableInstruction printable_instr;
+  printable_instr.register_configuration_ = config;
+  for (int j = block->first_instruction_index();
+       j <= block->last_instruction_index(); j++) {
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    SNPrintF(buf, "%5d", j);
+    printable_instr.instr_ = InstructionAt(j);
+    os << "   " << buf.start() << ": " << printable_instr << "\n";
+  }
+
+  for (auto succ : block->successors()) {
+    os << " B" << succ.ToInt();
+  }
+  os << "\n";
+}
+
+void InstructionSequence::PrintBlock(int block_id) const {
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+  PrintBlock(config, block_id);
+}
 
 FrameStateDescriptor::FrameStateDescriptor(
     Zone* zone, FrameStateType type, BailoutId bailout_id,
@@ -901,53 +979,7 @@
     os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
   }
   for (int i = 0; i < code.InstructionBlockCount(); i++) {
-    RpoNumber rpo = RpoNumber::FromInt(i);
-    const InstructionBlock* block = code.InstructionBlockAt(rpo);
-    CHECK(block->rpo_number() == rpo);
-
-    os << "B" << block->rpo_number();
-    os << ": AO#" << block->ao_number();
-    if (block->IsDeferred()) os << " (deferred)";
-    if (!block->needs_frame()) os << " (no frame)";
-    if (block->must_construct_frame()) os << " (construct frame)";
-    if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
-    if (block->IsLoopHeader()) {
-      os << " loop blocks: [" << block->rpo_number() << ", "
-         << block->loop_end() << ")";
-    }
-    os << "  instructions: [" << block->code_start() << ", "
-       << block->code_end() << ")\n  predecessors:";
-
-    for (auto pred : block->predecessors()) {
-      os << " B" << pred.ToInt();
-    }
-    os << "\n";
-
-    for (auto phi : block->phis()) {
-      PrintableInstructionOperand printable_op = {
-          printable.register_configuration_, phi->output()};
-      os << "     phi: " << printable_op << " =";
-      for (auto input : phi->operands()) {
-        os << " v" << input;
-      }
-      os << "\n";
-    }
-
-    ScopedVector<char> buf(32);
-    PrintableInstruction printable_instr;
-    printable_instr.register_configuration_ = printable.register_configuration_;
-    for (int j = block->first_instruction_index();
-         j <= block->last_instruction_index(); j++) {
-      // TODO(svenpanne) Add some basic formatting to our streams.
-      SNPrintF(buf, "%5d", j);
-      printable_instr.instr_ = code.InstructionAt(j);
-      os << "   " << buf.start() << ": " << printable_instr << "\n";
-    }
-
-    for (auto succ : block->successors()) {
-      os << " B" << succ.ToInt();
-    }
-    os << "\n";
+    printable.sequence_->PrintBlock(printable.register_configuration_, i);
   }
   return os;
 }
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 8a6a0ae..9c978ce 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -67,8 +67,10 @@
   inline bool IsAnyRegister() const;
   inline bool IsRegister() const;
   inline bool IsDoubleRegister() const;
+  inline bool IsSimd128Register() const;
   inline bool IsStackSlot() const;
   inline bool IsDoubleStackSlot() const;
+  inline bool IsSimd128StackSlot() const;
 
   template <typename SubKindOperand>
   static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
@@ -411,7 +413,7 @@
   }
 
   int index() const {
-    DCHECK(IsStackSlot() || IsDoubleStackSlot());
+    DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
     return static_cast<int64_t>(value_) >> IndexField::kShift;
   }
 
@@ -427,6 +429,12 @@
                                      IndexField::kShift);
   }
 
+  Simd128Register GetSimd128Register() const {
+    DCHECK(IsSimd128Register());
+    return Simd128Register::from_code(static_cast<int64_t>(value_) >>
+                                      IndexField::kShift);
+  }
+
   LocationKind location_kind() const {
     return LocationKindField::decode(value_);
   }
@@ -441,6 +449,7 @@
       case MachineRepresentation::kWord64:
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
+      case MachineRepresentation::kSimd128:
       case MachineRepresentation::kTagged:
         return true;
       case MachineRepresentation::kBit:
@@ -522,6 +531,12 @@
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsSimd128Register() const {
+  return IsAnyRegister() &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kSimd128;
+}
+
 bool InstructionOperand::IsStackSlot() const {
   return (IsAllocated() || IsExplicit()) &&
          LocationOperand::cast(this)->location_kind() ==
@@ -536,6 +551,14 @@
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsSimd128StackSlot() const {
+  return (IsAllocated() || IsExplicit()) &&
+         LocationOperand::cast(this)->location_kind() ==
+             LocationOperand::STACK_SLOT &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kSimd128;
+}
+
 uint64_t InstructionOperand::GetCanonicalizedValue() const {
   if (IsAllocated() || IsExplicit()) {
     // TODO(dcarney): put machine type last and mask.
@@ -633,8 +656,14 @@
 
   MoveOperands* AddMove(const InstructionOperand& from,
                         const InstructionOperand& to) {
-    auto zone = get_allocator().zone();
-    auto move = new (zone) MoveOperands(from, to);
+    Zone* zone = get_allocator().zone();
+    return AddMove(from, to, zone);
+  }
+
+  MoveOperands* AddMove(const InstructionOperand& from,
+                        const InstructionOperand& to,
+                        Zone* operand_allocation_zone) {
+    MoveOperands* move = new (operand_allocation_zone) MoveOperands(from, to);
     push_back(move);
     return move;
   }
@@ -732,7 +761,6 @@
     return FlagsConditionField::decode(opcode());
   }
 
-  // TODO(titzer): make call into a flags.
   static Instruction* New(Zone* zone, InstructionCode opcode) {
     return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
   }
@@ -1323,6 +1351,11 @@
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
+  void PrintBlock(const RegisterConfiguration* config, int block_id) const;
+  void PrintBlock(int block_id) const;
+
+  void Validate();
+
  private:
   friend std::ostream& operator<<(std::ostream& os,
                                   const PrintableInstructionSequence& code);
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
new file mode 100644
index 0000000..ff31abe
--- /dev/null
+++ b/src/compiler/int64-lowering.cc
@@ -0,0 +1,299 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/int64-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/node.h"
+#include "src/wasm/wasm-module.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+                             CommonOperatorBuilder* common, Zone* zone,
+                             Signature<MachineRepresentation>* signature)
+    : zone_(zone),
+      graph_(graph),
+      machine_(machine),
+      common_(common),
+      state_(graph, 4),
+      stack_(zone),
+      replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
+      signature_(signature) {
+  memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+}
+
+void Int64Lowering::LowerGraph() {
+  if (4 != kPointerSize) {
+    return;
+  }
+  stack_.push(graph()->end());
+  state_.Set(graph()->end(), State::kOnStack);
+
+  while (!stack_.empty()) {
+    Node* top = stack_.top();
+    if (state_.Get(top) == State::kInputsPushed) {
+      stack_.pop();
+      state_.Set(top, State::kVisited);
+      // All inputs of top have already been reduced, now reduce top.
+      LowerNode(top);
+    } else {
+      // Push all children onto the stack.
+      for (Node* input : top->inputs()) {
+        if (state_.Get(input) == State::kUnvisited) {
+          stack_.push(input);
+          state_.Set(input, State::kOnStack);
+        }
+      }
+      state_.Set(top, State::kInputsPushed);
+    }
+  }
+}
+
+static int GetParameterIndexAfterLowering(
+    Signature<MachineRepresentation>* signature, int old_index) {
+  int result = old_index;
+  for (int i = 0; i < old_index; i++) {
+    if (signature->GetParam(i) == MachineRepresentation::kWord64) {
+      result++;
+    }
+  }
+  return result;
+}
+
+static int GetParameterCountAfterLowering(
+    Signature<MachineRepresentation>* signature) {
+  return GetParameterIndexAfterLowering(
+      signature, static_cast<int>(signature->parameter_count()));
+}
+
+static int GetReturnCountAfterLowering(
+    Signature<MachineRepresentation>* signature) {
+  int result = static_cast<int>(signature->return_count());
+  for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+    if (signature->GetReturn(i) == MachineRepresentation::kWord64) {
+      result++;
+    }
+  }
+  return result;
+}
+
+void Int64Lowering::LowerNode(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt64Constant: {
+      int64_t value = OpParameter<int64_t>(node);
+      Node* low_node = graph()->NewNode(
+          common()->Int32Constant(static_cast<int32_t>(value & 0xFFFFFFFF)));
+      Node* high_node = graph()->NewNode(
+          common()->Int32Constant(static_cast<int32_t>(value >> 32)));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kLoad: {
+      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+      if (load_rep.representation() == MachineRepresentation::kWord64) {
+        Node* base = node->InputAt(0);
+        Node* index = node->InputAt(1);
+        Node* index_high =
+            graph()->NewNode(machine()->Int32Add(), index,
+                             graph()->NewNode(common()->Int32Constant(4)));
+
+        const Operator* load_op = machine()->Load(MachineType::Int32());
+        Node* high_node;
+        if (node->InputCount() > 2) {
+          Node* effect_high = node->InputAt(2);
+          Node* control_high = node->InputAt(3);
+          high_node = graph()->NewNode(load_op, base, index_high, effect_high,
+                                       control_high);
+          // change the effect change from old_node --> old_effect to
+          // old_node --> high_node --> old_effect.
+          node->ReplaceInput(2, high_node);
+        } else {
+          high_node = graph()->NewNode(load_op, base, index_high);
+        }
+        NodeProperties::ChangeOp(node, load_op);
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kStore: {
+      StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+      if (store_rep.representation() == MachineRepresentation::kWord64) {
+        // We change the original store node to store the low word, and create
+        // a new store node to store the high word. The effect and control edges
+        // are copied from the original store to the new store node, the effect
+        // edge of the original store is redirected to the new store.
+        WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+
+        Node* base = node->InputAt(0);
+        Node* index = node->InputAt(1);
+        Node* index_high =
+            graph()->NewNode(machine()->Int32Add(), index,
+                             graph()->NewNode(common()->Int32Constant(4)));
+
+        Node* value = node->InputAt(2);
+        DCHECK(HasReplacementLow(value));
+        DCHECK(HasReplacementHigh(value));
+
+        const Operator* store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kWord32, write_barrier_kind));
+
+        Node* high_node;
+        if (node->InputCount() > 3) {
+          Node* effect_high = node->InputAt(3);
+          Node* control_high = node->InputAt(4);
+          high_node = graph()->NewNode(store_op, base, index_high,
+                                       GetReplacementHigh(value), effect_high,
+                                       control_high);
+          node->ReplaceInput(3, high_node);
+
+        } else {
+          high_node = graph()->NewNode(store_op, base, index_high,
+                                       GetReplacementHigh(value));
+        }
+
+        node->ReplaceInput(2, GetReplacementLow(value));
+        NodeProperties::ChangeOp(node, store_op);
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kWord64And: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      ReplaceNode(node, GetReplacementLow(input), nullptr);
+      node->NullAllInputs();
+      break;
+    }
+    case IrOpcode::kStart: {
+      int parameter_count = GetParameterCountAfterLowering(signature());
+      // Only exchange the node if the parameter count actually changed.
+      if (parameter_count != signature()->parameter_count()) {
+        int delta =
+            parameter_count - static_cast<int>(signature()->parameter_count());
+        int new_output_count = node->op()->ValueOutputCount() + delta;
+        NodeProperties::ChangeOp(node, common()->Start(new_output_count));
+      }
+      break;
+    }
+    case IrOpcode::kParameter: {
+      DCHECK(node->InputCount() == 1);
+      // Only exchange the node if the parameter count actually changed. We do
+      // not even have to do the default lowering because the the start node,
+      // the only input of a parameter node, only changes if the parameter count
+      // changes.
+      if (GetParameterCountAfterLowering(signature()) !=
+          signature()->parameter_count()) {
+        int old_index = ParameterIndexOf(node->op());
+        int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+        NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+
+        Node* high_node = nullptr;
+        if (signature()->GetParam(old_index) ==
+            MachineRepresentation::kWord64) {
+          high_node = graph()->NewNode(common()->Parameter(new_index + 1),
+                                       graph()->start());
+        }
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kReturn: {
+      DefaultLowering(node);
+      int new_return_count = GetReturnCountAfterLowering(signature());
+      if (signature()->return_count() != new_return_count) {
+        NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+      }
+      break;
+    }
+    case IrOpcode::kCall: {
+      CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+      if (DefaultLowering(node) ||
+          (descriptor->ReturnCount() == 1 &&
+           descriptor->GetReturnType(0) == MachineType::Int64())) {
+        // We have to adjust the call descriptor.
+        const Operator* op = common()->Call(
+            wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), descriptor));
+        NodeProperties::ChangeOp(node, op);
+      }
+      if (descriptor->ReturnCount() == 1 &&
+          descriptor->GetReturnType(0) == MachineType::Int64()) {
+        // We access the additional return values through projections.
+        Node* low_node = graph()->NewNode(common()->Projection(0), node);
+        Node* high_node = graph()->NewNode(common()->Projection(1), node);
+        ReplaceNode(node, low_node, high_node);
+      }
+      break;
+    }
+    default: { DefaultLowering(node); }
+  }
+}
+
+bool Int64Lowering::DefaultLowering(Node* node) {
+  bool something_changed = false;
+  for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+    Node* input = node->InputAt(i);
+    if (HasReplacementLow(input)) {
+      something_changed = true;
+      node->ReplaceInput(i, GetReplacementLow(input));
+    }
+    if (HasReplacementHigh(input)) {
+      something_changed = true;
+      node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
+    }
+  }
+  return something_changed;
+}
+
+void Int64Lowering::ReplaceNode(Node* old, Node* new_low, Node* new_high) {
+  // if new_low == nullptr, then also new_high == nullptr.
+  DCHECK(new_low != nullptr || new_high == nullptr);
+  replacements_[old->id()].low = new_low;
+  replacements_[old->id()].high = new_high;
+}
+
+bool Int64Lowering::HasReplacementLow(Node* node) {
+  return replacements_[node->id()].low != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementLow(Node* node) {
+  Node* result = replacements_[node->id()].low;
+  DCHECK(result);
+  return result;
+}
+
+bool Int64Lowering::HasReplacementHigh(Node* node) {
+  return replacements_[node->id()].high != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementHigh(Node* node) {
+  Node* result = replacements_[node->id()].high;
+  DCHECK(result);
+  return result;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
new file mode 100644
index 0000000..79a95dc
--- /dev/null
+++ b/src/compiler/int64-lowering.h
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INT64_REDUCER_H_
+#define V8_COMPILER_INT64_REDUCER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Int64Lowering {
+ public:
+  Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+                CommonOperatorBuilder* common, Zone* zone,
+                Signature<MachineRepresentation>* signature);
+
+  void LowerGraph();
+
+ private:
+  enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+
+  struct Replacement {
+    Node* low;
+    Node* high;
+  };
+
+  Zone* zone() const { return zone_; }
+  Graph* graph() const { return graph_; }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  CommonOperatorBuilder* common() const { return common_; }
+  Signature<MachineRepresentation>* signature() const { return signature_; }
+
+  void LowerNode(Node* node);
+  bool DefaultLowering(Node* node);
+
+  void ReplaceNode(Node* old, Node* new_low, Node* new_high);
+  bool HasReplacementLow(Node* node);
+  Node* GetReplacementLow(Node* node);
+  bool HasReplacementHigh(Node* node);
+  Node* GetReplacementHigh(Node* node);
+
+  Zone* zone_;
+  Graph* const graph_;
+  MachineOperatorBuilder* machine_;
+  CommonOperatorBuilder* common_;
+  NodeMarker<State> state_;
+  ZoneStack<Node*> stack_;
+  Replacement* replacements_;
+  Signature<MachineRepresentation>* signature_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INT64_REDUCER_H_
diff --git a/src/compiler/interpreter-assembler.cc b/src/compiler/interpreter-assembler.cc
deleted file mode 100644
index 7080d02..0000000
--- a/src/compiler/interpreter-assembler.cc
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/interpreter-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
-                                           interpreter::Bytecode bytecode)
-    : bytecode_(bytecode),
-      raw_assembler_(new RawMachineAssembler(
-          isolate, new (zone) Graph(zone),
-          Linkage::GetInterpreterDispatchDescriptor(zone),
-          MachineType::PointerRepresentation(),
-          InstructionSelector::SupportedMachineOperatorFlags())),
-      accumulator_(
-          raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
-      bytecode_offset_(raw_assembler_->Parameter(
-          Linkage::kInterpreterBytecodeOffsetParameter)),
-      context_(
-          raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
-      code_generated_(false) {}
-
-
-InterpreterAssembler::~InterpreterAssembler() {}
-
-
-Handle<Code> InterpreterAssembler::GenerateCode() {
-  DCHECK(!code_generated_);
-
-  // Disallow empty handlers that never return.
-  DCHECK_NE(0, graph()->end()->InputCount());
-
-  const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
-  Schedule* schedule = raw_assembler_->Export();
-  Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
-      Code::STUB, bytecode_name);
-
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_trace_ignition_codegen) {
-    OFStream os(stdout);
-    code->Disassemble(bytecode_name, os);
-    os << std::flush;
-  }
-#endif
-
-  code_generated_ = true;
-  return code;
-}
-
-
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
-
-
-void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
-
-
-Node* InterpreterAssembler::GetContext() { return context_; }
-
-
-void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
-
-
-Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
-
-
-Node* InterpreterAssembler::RegisterFileRawPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
-}
-
-
-Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
-}
-
-
-Node* InterpreterAssembler::DispatchTableRawPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
-}
-
-
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(),
-                              RegisterFileRawPointer(), Int32Constant(offset));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
-  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
-  return WordShl(index, kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
-  return raw_assembler_->Load(MachineType::AnyTagged(),
-                              RegisterFileRawPointer(),
-                              RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
-  return raw_assembler_->Store(MachineRepresentation::kTagged,
-                               RegisterFileRawPointer(), Int32Constant(offset),
-                               value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value,
-                                          interpreter::Register reg) {
-  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
-  return raw_assembler_->Store(
-      MachineRepresentation::kTagged, RegisterFileRawPointer(),
-      RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::NextRegister(Node* reg_index) {
-  // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, Int32Constant(-1));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kByte,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  return raw_assembler_->Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(),
-                Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                    bytecode_, operand_index))));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kByte,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  Node* load = raw_assembler_->Load(
-      MachineType::Int8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(),
-                Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                    bytecode_, operand_index))));
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = raw_assembler_->ChangeInt32ToInt64(load);
-  }
-  return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kShort,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  if (TargetSupportsUnalignedAccess()) {
-    return raw_assembler_->Load(
-        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(),
-                  Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                      bytecode_, operand_index))));
-  } else {
-    int offset =
-        interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
-    Node* first_byte = raw_assembler_->Load(
-        MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
-    Node* second_byte = raw_assembler_->Load(
-        MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
-#if V8_TARGET_LITTLE_ENDIAN
-    return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
-                                  first_byte);
-#elif V8_TARGET_BIG_ENDIAN
-    return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
-                                  second_byte);
-#else
-#error "Unknown Architecture"
-#endif
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
-    int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kShort,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  int operand_offset =
-      interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
-  Node* load;
-  if (TargetSupportsUnalignedAccess()) {
-    load = raw_assembler_->Load(
-        MachineType::Int16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
-  } else {
-#if V8_TARGET_LITTLE_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
-    Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset);
-    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
-    Node* hi_byte =
-        raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                             IntPtrAdd(BytecodeOffset(), hi_byte_offset));
-    Node* lo_byte =
-        raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                             IntPtrAdd(BytecodeOffset(), lo_byte_offset));
-    hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
-    load = raw_assembler_->Word32Or(hi_byte, lo_byte);
-  }
-
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = raw_assembler_->ChangeInt32ToInt64(load);
-  }
-  return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case interpreter::OperandSize::kByte:
-      DCHECK_EQ(
-          interpreter::OperandType::kCount8,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case interpreter::OperandSize::kShort:
-      DCHECK_EQ(
-          interpreter::OperandType::kCount16,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
-  DCHECK_EQ(interpreter::OperandType::kImm8,
-            interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-  return BytecodeOperandSignExtended(operand_index);
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case interpreter::OperandSize::kByte:
-      DCHECK_EQ(
-          interpreter::OperandType::kIdx8,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case interpreter::OperandSize::kShort:
-      DCHECK_EQ(
-          interpreter::OperandType::kIdx16,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
-    case interpreter::OperandType::kReg8:
-    case interpreter::OperandType::kRegPair8:
-    case interpreter::OperandType::kMaybeReg8:
-      DCHECK_EQ(
-          interpreter::OperandSize::kByte,
-          interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-      return BytecodeOperandSignExtended(operand_index);
-    case interpreter::OperandType::kReg16:
-      DCHECK_EQ(
-          interpreter::OperandSize::kShort,
-          interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-      return BytecodeOperandShortSignExtended(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::Int32Constant(int value) {
-  return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
-  return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* InterpreterAssembler::NumberConstant(double value) {
-  return raw_assembler_->NumberConstant(value);
-}
-
-
-Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
-  return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* InterpreterAssembler::BooleanConstant(bool value) {
-  return raw_assembler_->BooleanConstant(value);
-}
-
-
-Node* InterpreterAssembler::SmiShiftBitsConstant() {
-  return Int32Constant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* InterpreterAssembler::SmiTag(Node* value) {
-  return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::SmiUntag(Node* value) {
-  return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
-  return raw_assembler_->IntPtrAdd(a, b);
-}
-
-
-Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
-  return raw_assembler_->IntPtrSub(a, b);
-}
-
-
-Node* InterpreterAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, Int32Constant(shift));
-}
-
-
-Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
-  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
-                                        BytecodeArray::kConstantPoolOffset);
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(index, kPointerSizeLog2));
-  return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
-                              entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
-                                                  int index) {
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(Int32Constant(index), kPointerSizeLog2));
-  return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
-                              entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), object,
-                              IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), context,
-                              IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
-  return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
-}
-
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
-                                             Node* value) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
-  return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
-                               value, kFullWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::LoadTypeFeedbackVector() {
-  Node* function = raw_assembler_->Load(
-      MachineType::AnyTagged(), RegisterFileRawPointer(),
-      IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  Node* shared_info =
-      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
-  Node* vector =
-      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
-  return vector;
-}
-
-
-Node* InterpreterAssembler::Projection(int index, Node* node) {
-  return raw_assembler_->Projection(index, node);
-}
-
-
-Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
-                                          Node* first_arg, Node* arg_count) {
-  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
-  Node* code_target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg_count;
-  args[1] = new_target;
-  args[2] = constructor;
-  args[3] = first_arg;
-  args[4] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-void InterpreterAssembler::CallPrologue() {
-  StoreRegister(SmiTag(bytecode_offset_),
-                InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-}
-
-
-void InterpreterAssembler::CallEpilogue() {
-  // Restore the bytecode offset from the stack frame.
-  bytecode_offset_ = SmiUntag(LoadRegister(
-      InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
-}
-
-
-Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
-                                  Node** args) {
-  CallPrologue();
-
-  Node* stack_pointer_before_call = nullptr;
-  if (FLAG_debug_code) {
-    stack_pointer_before_call = raw_assembler_->LoadStackPointer();
-  }
-  Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
-  if (FLAG_debug_code) {
-    Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
-    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
-                        kUnexpectedStackPointer);
-  }
-
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
-                                   Node* arg_count) {
-  Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
-  Node* code_target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg_count;
-  args[1] = first_arg;
-  args[2] = function;
-  args[3] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node** args) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
-  return CallN(call_descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3) {
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3, Node* arg4) {
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3, Node* arg4, Node* arg5) {
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
-                                        Node* arg_count, int result_size) {
-  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
-      Operator::kNoProperties, MachineType::AnyTagged(), result_size);
-  Node* code_target = HeapConstant(callable.code());
-
-  // Get the function entry from the function id.
-  Node* function_table = raw_assembler_->ExternalConstant(
-      ExternalReference::runtime_function_table_address(isolate()));
-  Node* function_offset = raw_assembler_->Int32Mul(
-      function_id, Int32Constant(sizeof(Runtime::Function)));
-  Node* function = IntPtrAdd(function_table, function_offset);
-  Node* function_entry =
-      raw_assembler_->Load(MachineType::Pointer(), function,
-                           Int32Constant(offsetof(Runtime::Function, entry)));
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg_count;
-  args[1] = first_arg;
-  args[2] = function_entry;
-  args[3] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1) {
-  CallPrologue();
-  Node* return_val =
-      raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1, Node* arg2) {
-  CallPrologue();
-  Node* return_val =
-      raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4) {
-  CallPrologue();
-  Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
-                                                  arg4, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-void InterpreterAssembler::Return() {
-  Node* exit_trampoline_code_object =
-      HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
-  // If the order of the parameters you need to change the call signature below.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  Node* args[] = { GetAccumulator(),
-                   RegisterFileRawPointer(),
-                   BytecodeOffset(),
-                   BytecodeArrayTaggedPointer(),
-                   DispatchTableRawPointer(),
-                   GetContext() };
-  raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
-                            args);
-}
-
-
-Node* InterpreterAssembler::Advance(int delta) {
-  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
-}
-
-
-Node* InterpreterAssembler::Advance(Node* delta) {
-  return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
-}
-
-
-void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
-
-
-void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
-  RawMachineLabel match, no_match;
-  Node* condition = raw_assembler_->WordEqual(lhs, rhs);
-  raw_assembler_->Branch(condition, &match, &no_match);
-  raw_assembler_->Bind(&match);
-  DispatchTo(Advance(delta));
-  raw_assembler_->Bind(&no_match);
-  Dispatch();
-}
-
-
-void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
-}
-
-
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
-  Node* target_bytecode = raw_assembler_->Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
-
-  // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
-  // from code object on every dispatch.
-  Node* target_code_object = raw_assembler_->Load(
-      MachineType::Pointer(), DispatchTableRawPointer(),
-      raw_assembler_->Word32Shl(target_bytecode,
-                                Int32Constant(kPointerSizeLog2)));
-
-  // If the order of the parameters you need to change the call signature below.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  Node* args[] = { GetAccumulator(),
-                   RegisterFileRawPointer(),
-                   new_bytecode_offset,
-                   BytecodeArrayTaggedPointer(),
-                   DispatchTableRawPointer(),
-                   GetContext() };
-  raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
-}
-
-
-void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
-  Node* abort_id = SmiTag(Int32Constant(bailout_reason));
-  Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
-  // Unreached, but keeps turbofan happy.
-  raw_assembler_->Return(ret_value);
-}
-
-
-void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
-                                               BailoutReason bailout_reason) {
-  RawMachineLabel match, no_match;
-  Node* condition = raw_assembler_->WordEqual(lhs, rhs);
-  raw_assembler_->Branch(condition, &match, &no_match);
-  raw_assembler_->Bind(&no_match);
-  Abort(bailout_reason);
-  raw_assembler_->Bind(&match);
-}
-
-
-// static
-bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-  return false;
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
-  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
-  return true;
-#else
-#error "Unknown Architecture"
-#endif
-}
-
-
-// RawMachineAssembler delegate helpers:
-Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
-
-
-Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
-
-
-CallDescriptor* InterpreterAssembler::call_descriptor() const {
-  return raw_assembler_->call_descriptor();
-}
-
-
-Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/interpreter-assembler.h b/src/compiler/interpreter-assembler.h
deleted file mode 100644
index fb79d3e..0000000
--- a/src/compiler/interpreter-assembler.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
-#include "src/frames.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-class CallInterfaceDescriptor;
-class Isolate;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class Schedule;
-
-class InterpreterAssembler {
- public:
-  InterpreterAssembler(Isolate* isolate, Zone* zone,
-                       interpreter::Bytecode bytecode);
-  virtual ~InterpreterAssembler();
-
-  Handle<Code> GenerateCode();
-
-  // Returns the count immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandCount(int operand_index);
-  // Returns the index immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandIdx(int operand_index);
-  // Returns the Imm8 immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandImm(int operand_index);
-  // Returns the register index for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandReg(int operand_index);
-
-  // Accumulator.
-  Node* GetAccumulator();
-  void SetAccumulator(Node* value);
-
-  // Context.
-  Node* GetContext();
-  void SetContext(Node* value);
-
-  // Loads from and stores to the interpreter register file.
-  Node* LoadRegister(int offset);
-  Node* LoadRegister(interpreter::Register reg);
-  Node* LoadRegister(Node* reg_index);
-  Node* StoreRegister(Node* value, int offset);
-  Node* StoreRegister(Node* value, interpreter::Register reg);
-  Node* StoreRegister(Node* value, Node* reg_index);
-
-  // Returns the next consecutive register.
-  Node* NextRegister(Node* reg_index);
-
-  // Returns the location in memory of the register |reg_index| in the
-  // interpreter register file.
-  Node* RegisterLocation(Node* reg_index);
-
-  // Constants.
-  Node* Int32Constant(int value);
-  Node* IntPtrConstant(intptr_t value);
-  Node* NumberConstant(double value);
-  Node* HeapConstant(Handle<HeapObject> object);
-  Node* BooleanConstant(bool value);
-
-  // Tag and untag Smi values.
-  Node* SmiTag(Node* value);
-  Node* SmiUntag(Node* value);
-
-  // Basic arithmetic operations.
-  Node* IntPtrAdd(Node* a, Node* b);
-  Node* IntPtrSub(Node* a, Node* b);
-  Node* WordShl(Node* value, int shift);
-
-  // Load constant at |index| in the constant pool.
-  Node* LoadConstantPoolEntry(Node* index);
-
-  // Load an element from a fixed array on the heap.
-  Node* LoadFixedArrayElement(Node* fixed_array, int index);
-
-  // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
-
-  // Load |slot_index| from |context|.
-  Node* LoadContextSlot(Node* context, int slot_index);
-  Node* LoadContextSlot(Node* context, Node* slot_index);
-  // Stores |value| into |slot_index| of |context|.
-  Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
-
-  // Load the TypeFeedbackVector for the current function.
-  Node* LoadTypeFeedbackVector();
-
-  // Project the output value at index |index|
-  Node* Projection(int index, Node* node);
-
-  // Call constructor |constructor| with |arg_count| arguments (not
-  // including receiver) and the first argument located at
-  // |first_arg|. The |new_target| is the same as the
-  // |constructor| for the new keyword, but differs for the super
-  // keyword.
-  Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
-                      Node* arg_count);
-
-  // Call JSFunction or Callable |function| with |arg_count|
-  // arguments (not including receiver) and the first argument
-  // located at |first_arg|.
-  Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
-
-  // Call an IC code stub.
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3, Node* arg4);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
-  // Call runtime function.
-  Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
-                    int return_size = 1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
-                    Node* arg3, Node* arg4);
-
-  // Jump relative to the current bytecode by |jump_offset|.
-  void Jump(Node* jump_offset);
-
-  // Jump relative to the current bytecode by |jump_offset| if the
-  // word values |lhs| and |rhs| are equal.
-  void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
-
-  // Returns from the function.
-  void Return();
-
-  // Dispatch to the bytecode.
-  void Dispatch();
-
-  // Abort with the given bailout reason.
-  void Abort(BailoutReason bailout_reason);
-
- protected:
-  static bool TargetSupportsUnalignedAccess();
-
-  // Protected helpers (for testing) which delegate to RawMachineAssembler.
-  CallDescriptor* call_descriptor() const;
-  Graph* graph();
-
- private:
-  // Returns a raw pointer to start of the register file on the stack.
-  Node* RegisterFileRawPointer();
-  // Returns a tagged pointer to the current function's BytecodeArray object.
-  Node* BytecodeArrayTaggedPointer();
-  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
-  Node* BytecodeOffset();
-  // Returns a raw pointer to first entry in the interpreter dispatch table.
-  Node* DispatchTableRawPointer();
-
-  // Saves and restores interpreter bytecode offset to the interpreter stack
-  // frame when performing a call.
-  void CallPrologue();
-  void CallEpilogue();
-
-  // Returns the offset of register |index| relative to RegisterFilePointer().
-  Node* RegisterFrameOffset(Node* index);
-
-  Node* SmiShiftBitsConstant();
-  Node* BytecodeOperand(int operand_index);
-  Node* BytecodeOperandSignExtended(int operand_index);
-  Node* BytecodeOperandShort(int operand_index);
-  Node* BytecodeOperandShortSignExtended(int operand_index);
-
-  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
-
-  // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
-  // update BytecodeOffset() itself.
-  Node* Advance(int delta);
-  Node* Advance(Node* delta);
-
-  // Starts next instruction dispatch at |new_bytecode_offset|.
-  void DispatchTo(Node* new_bytecode_offset);
-
-  // Abort operations for debug code.
-  void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
-
-  // Private helpers which delegate to RawMachineAssembler.
-  Isolate* isolate();
-  Zone* zone();
-
-  interpreter::Bytecode bytecode_;
-  base::SmartPointer<RawMachineAssembler> raw_assembler_;
-
-  Node* accumulator_;
-  Node* bytecode_offset_;
-  Node* context_;
-
-  bool code_generated_;
-
-  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_INTERPRETER_ASSEMBLER_H_
diff --git a/src/compiler/ir-operations.txt b/src/compiler/ir-operations.txt
deleted file mode 100644
index e69de29..0000000
--- a/src/compiler/ir-operations.txt
+++ /dev/null
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index a7a7da5..3023031 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -8,6 +8,7 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/objects-inl.h"
+#include "src/type-cache.h"
 #include "src/types.h"
 
 namespace v8 {
@@ -85,10 +86,10 @@
   Node* node_;
 };
 
-
 JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
-    : AdvancedReducer(editor), jsgraph_(jsgraph) {}
-
+    : AdvancedReducer(editor),
+      jsgraph_(jsgraph),
+      type_cache_(TypeCache::Get()) {}
 
 // ECMA-262, section 15.8.2.11.
 Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
@@ -141,6 +142,31 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.28 Math.round ( x )
+Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    // Math.round(a:integer \/ -0 \/ NaN) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchOne(Type::Number()) &&
+      machine()->Float64RoundUp().IsSupported()) {
+    // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
+    //                                Float64Sub(i, #1.0), i)
+    //   where i = Float64RoundUp(a)
+    Node* value = r.left();
+    Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
+    Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
+    return Replace(graph()->NewNode(
+        common()->Select(MachineRepresentation::kFloat64),
+        graph()->NewNode(machine()->Float64LessThan(),
+                         jsgraph()->Float64Constant(0.5), real),
+        graph()->NewNode(machine()->Float64Sub(), integer,
+                         jsgraph()->Float64Constant(1.0)),
+        integer));
+  }
+  return NoChange();
+}
 
 Reduction JSBuiltinReducer::Reduce(Node* node) {
   Reduction reduction = NoChange();
@@ -158,6 +184,9 @@
     case kMathFround:
       reduction = ReduceMathFround(node);
       break;
+    case kMathRound:
+      reduction = ReduceMathRound(node);
+      break;
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index cfacdc1..b64b335 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -9,6 +9,10 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
 namespace compiler {
 
 // Forward declarations.
@@ -30,6 +34,7 @@
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
   Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathRound(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -38,7 +43,8 @@
   MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
 
-  JSGraph* jsgraph_;
+  JSGraph* const jsgraph_;
+  TypeCache const& type_cache_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index a15d6fd..34217e7 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -129,8 +129,7 @@
     // Get to the actual frame state from which to extract the arguments;
     // we can only optimize this in case the {node} was already inlined into
     // some other function (and same for the {arg_array}).
-    CreateArgumentsParameters const& p =
-        CreateArgumentsParametersOf(arg_array->op());
+    CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
     Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
     Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
     if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
@@ -140,17 +139,22 @@
       frame_state = outer_state;
     }
     FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-    if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+    int start_index = 0;
+    if (type == CreateArgumentsType::kMappedArguments) {
       // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
       Handle<SharedFunctionInfo> shared;
       if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
       if (shared->internal_formal_parameter_count() != 0) return NoChange();
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      start_index = shared->internal_formal_parameter_count();
     }
     // Remove the argArray input from the {node}.
     node->RemoveInput(static_cast<int>(--arity));
     // Add the actual parameters to the {node}, skipping the receiver.
     Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-    for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+    for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
       node->InsertInput(graph()->zone(), static_cast<int>(arity),
                         parameters->InputAt(i));
       ++arity;
@@ -163,8 +167,7 @@
   }
   // Change {node} to the new {JSCallFunction} operator.
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.language_mode(),
-                                       CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
                                        convert_mode, p.tail_call_mode()));
   // Change context of {node} to the Function.prototype.apply context,
   // to ensure any exception is thrown in the correct context.
@@ -204,8 +207,7 @@
     --arity;
   }
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.language_mode(),
-                                       CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
                                        convert_mode, p.tail_call_mode()));
   // Try to further reduce the JSCallFunction {node}.
   Reduction const reduction = ReduceJSCallFunction(node);
@@ -287,10 +289,9 @@
             jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
         arity++;
       }
-      NodeProperties::ChangeOp(
-          node, javascript()->CallFunction(arity, p.language_mode(),
-                                           CallCountFeedback(p.feedback()),
-                                           convert_mode, p.tail_call_mode()));
+      NodeProperties::ChangeOp(node, javascript()->CallFunction(
+                                         arity, CallCountFeedback(p.feedback()),
+                                         convert_mode, p.tail_call_mode()));
       // Try to further reduce the JSCallFunction {node}.
       Reduction const reduction = ReduceJSCallFunction(node);
       return reduction.Changed() ? reduction : Changed(node);
@@ -336,6 +337,7 @@
                          frame_state, effect, if_false);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+    Revisit(graph()->end());
     control = graph()->NewNode(common()->IfTrue(), branch);
 
     // Turn the {node} into a {JSCreateArray} call.
@@ -361,6 +363,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
 
       // Specialize the JSCallFunction node to the {target_function}.
@@ -404,8 +407,7 @@
         NodeProperties::RemoveFrameStateInput(node, 0);
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
-            node,
-            javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+            node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
         return Changed(node);
       }
 
@@ -479,6 +481,7 @@
                          frame_state, effect, if_false);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+    Revisit(graph()->end());
     control = graph()->NewNode(common()->IfTrue(), branch);
 
     // Turn the {node} into a {JSCreateArray} call.
@@ -510,6 +513,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
 
       // Specialize the JSCallConstruct node to the {target_function}.
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 9ffae15..f40f05d 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public Reducer {
+class JSCallReducer final : public AdvancedReducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -29,9 +29,12 @@
   };
   typedef base::Flags<Flag> Flags;
 
-  JSCallReducer(JSGraph* jsgraph, Flags flags,
+  JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
                 MaybeHandle<Context> native_context)
-      : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+      : AdvancedReducer(editor),
+        jsgraph_(jsgraph),
+        flags_(flags),
+        native_context_(native_context) {}
 
   Reduction Reduce(Node* node) final;
 
diff --git a/src/compiler/js-context-relaxation.cc b/src/compiler/js-context-relaxation.cc
deleted file mode 100644
index 0ca3c0c..0000000
--- a/src/compiler/js-context-relaxation.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/frame-states.h"
-#include "src/compiler/js-context-relaxation.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Reduction JSContextRelaxation::Reduce(Node* node) {
-  switch (node->opcode()) {
-    case IrOpcode::kJSCallFunction:
-    case IrOpcode::kJSToNumber: {
-      Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-      Node* outer_frame = frame_state;
-      Node* original_context = NodeProperties::GetContextInput(node);
-      Node* candidate_new_context = original_context;
-      do {
-        FrameStateInfo frame_state_info(
-            OpParameter<FrameStateInfo>(outer_frame->op()));
-        const FrameStateFunctionInfo* function_info =
-            frame_state_info.function_info();
-        if (function_info == nullptr ||
-            (function_info->context_calling_mode() ==
-             CALL_CHANGES_NATIVE_CONTEXT)) {
-          break;
-        }
-        candidate_new_context = outer_frame->InputAt(kFrameStateContextInput);
-        outer_frame = outer_frame->InputAt(kFrameStateOuterStateInput);
-      } while (outer_frame->opcode() == IrOpcode::kFrameState);
-
-      while (true) {
-        switch (candidate_new_context->opcode()) {
-          case IrOpcode::kParameter:
-          case IrOpcode::kJSCreateModuleContext:
-          case IrOpcode::kJSCreateScriptContext:
-            if (candidate_new_context != original_context) {
-              NodeProperties::ReplaceContextInput(node, candidate_new_context);
-              return Changed(node);
-            } else {
-              return NoChange();
-            }
-          case IrOpcode::kJSCreateCatchContext:
-          case IrOpcode::kJSCreateWithContext:
-          case IrOpcode::kJSCreateBlockContext:
-            candidate_new_context =
-                NodeProperties::GetContextInput(candidate_new_context);
-            break;
-          default:
-            return NoChange();
-        }
-      }
-    }
-    default:
-      break;
-  }
-  return NoChange();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/js-context-relaxation.h b/src/compiler/js-context-relaxation.h
deleted file mode 100644
index 4320e92..0000000
--- a/src/compiler/js-context-relaxation.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-#define V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Ensures that operations that only need to access the native context use the
-// outer-most context rather than the specific context given by the AST graph
-// builder. This makes it possible to use these operations with context
-// specialization (e.g. for generating stubs) without forcing inner contexts to
-// be embedded in generated code thus causing leaks and potentially using the
-// wrong native context (i.e. stubs are shared between native contexts).
-class JSContextRelaxation final : public Reducer {
- public:
-  JSContextRelaxation() {}
-  ~JSContextRelaxation() final {}
-
-  Reduction Reduce(Node* node) final;
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_JS_CONTEXT_RELAXATION_H_
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
new file mode 100644
index 0000000..df5c8d0
--- /dev/null
+++ b/src/compiler/js-create-lowering.cc
@@ -0,0 +1,1096 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-create-lowering.h"
+
+#include "src/allocation-site-scopes.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// A helper class to construct inline allocations on the simplified operator
+// level. This keeps track of the effect chain for initial stores on a newly
+// allocated object and also provides helpers for commonly allocated objects.
+class AllocationBuilder final {
+ public:
+  AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+      : jsgraph_(jsgraph),
+        allocation_(nullptr),
+        effect_(effect),
+        control_(control) {}
+
+  // Primitive allocation of static size.
+  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+    effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+    allocation_ =
+        graph()->NewNode(simplified()->Allocate(pretenure),
+                         jsgraph()->Constant(size), effect_, control_);
+    effect_ = allocation_;
+  }
+
+  // Primitive store into a field.
+  void Store(const FieldAccess& access, Node* value) {
+    effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
+                               value, effect_, control_);
+  }
+
+  // Primitive store into an element.
+  void Store(ElementAccess const& access, Node* index, Node* value) {
+    effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+                               index, value, effect_, control_);
+  }
+
+  // Compound allocation of a FixedArray.
+  void AllocateArray(int length, Handle<Map> map,
+                     PretenureFlag pretenure = NOT_TENURED) {
+    DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+           map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+    int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+                   ? FixedArray::SizeFor(length)
+                   : FixedDoubleArray::SizeFor(length);
+    Allocate(size, pretenure);
+    Store(AccessBuilder::ForMap(), map);
+    Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+  }
+
+  // Compound store of a constant into a field.
+  void Store(const FieldAccess& access, Handle<Object> value) {
+    Store(access, jsgraph()->Constant(value));
+  }
+
+  void FinishAndChange(Node* node) {
+    NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+    node->ReplaceInput(0, allocation_);
+    node->ReplaceInput(1, effect_);
+    node->TrimInputCount(2);
+    NodeProperties::ChangeOp(node, common()->FinishRegion());
+  }
+
+  Node* Finish() {
+    return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+  }
+
+ protected:
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ private:
+  JSGraph* const jsgraph_;
+  Node* allocation_;
+  Node* effect_;
+  Node* control_;
+};
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+  Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+  return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+             ? outer_state
+             : frame_state;
+}
+
+// Checks whether allocation using the given target and new.target can be
+// inlined.
+bool IsAllocationInlineable(Handle<JSFunction> target,
+                            Handle<JSFunction> new_target) {
+  return new_target->has_initial_map() &&
+         new_target->initial_map()->constructor_or_backpointer() == *target;
+}
+
+// When initializing arrays, we'll unfold the loop if the number of
+// elements is known to be of this type.
+const int kElementLoopUnrollLimit = 16;
+
+// Limits up to which context allocations are inlined.
+const int kFunctionContextAllocationLimit = 16;
+const int kBlockContextAllocationLimit = 16;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
+                   int* max_properties) {
+  DCHECK_GE(max_depth, 0);
+  DCHECK_GE(*max_properties, 0);
+
+  // Make sure the boilerplate map is not deprecated.
+  if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+  // Check for too deep nesting.
+  if (max_depth == 0) return false;
+
+  // Check the elements.
+  Isolate* const isolate = boilerplate->GetIsolate();
+  Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+  if (elements->length() > 0 &&
+      elements->map() != isolate->heap()->fixed_cow_array_map()) {
+    if (boilerplate->HasFastSmiOrObjectElements()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+      int length = elements->length();
+      for (int i = 0; i < length; i++) {
+        if ((*max_properties)-- == 0) return false;
+        Handle<Object> value(fast_elements->get(i), isolate);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+            return false;
+          }
+        }
+      }
+    } else if (!boilerplate->HasFastDoubleElements()) {
+      return false;
+    }
+  }
+
+  // TODO(turbofan): Do we want to support out-of-object properties?
+  Handle<FixedArray> properties(boilerplate->properties(), isolate);
+  if (properties->length() > 0) return false;
+
+  // Check the in-object properties.
+  Handle<DescriptorArray> descriptors(
+      boilerplate->map()->instance_descriptors(), isolate);
+  int limit = boilerplate->map()->NumberOfOwnDescriptors();
+  for (int i = 0; i < limit; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+    if (details.type() != DATA) continue;
+    if ((*max_properties)-- == 0) return false;
+    FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+    if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+    Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = 8;
+
+}  // namespace
+
+Reduction JSCreateLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kJSCreate:
+      return ReduceJSCreate(node);
+    case IrOpcode::kJSCreateArguments:
+      return ReduceJSCreateArguments(node);
+    case IrOpcode::kJSCreateArray:
+      return ReduceJSCreateArray(node);
+    case IrOpcode::kJSCreateIterResultObject:
+      return ReduceJSCreateIterResultObject(node);
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+      return ReduceJSCreateLiteral(node);
+    case IrOpcode::kJSCreateFunctionContext:
+      return ReduceJSCreateFunctionContext(node);
+    case IrOpcode::kJSCreateWithContext:
+      return ReduceJSCreateWithContext(node);
+    case IrOpcode::kJSCreateCatchContext:
+      return ReduceJSCreateCatchContext(node);
+    case IrOpcode::kJSCreateBlockContext:
+      return ReduceJSCreateBlockContext(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+  Node* const target = NodeProperties::GetValueInput(node, 0);
+  Type* const target_type = NodeProperties::GetType(target);
+  Node* const new_target = NodeProperties::GetValueInput(node, 1);
+  Type* const new_target_type = NodeProperties::GetType(new_target);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  // Extract constructor and original constructor function.
+  if (target_type->IsConstant() &&
+      new_target_type->IsConstant() &&
+      new_target_type->AsConstant()->Value()->IsJSFunction()) {
+    Handle<JSFunction> constructor =
+        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+    Handle<JSFunction> original_constructor =
+        Handle<JSFunction>::cast(new_target_type->AsConstant()->Value());
+    DCHECK(constructor->IsConstructor());
+    DCHECK(original_constructor->IsConstructor());
+
+    // Check if we can inline the allocation.
+    if (IsAllocationInlineable(constructor, original_constructor)) {
+      // Force completion of inobject slack tracking before
+      // generating code to finalize the instance size.
+      original_constructor->CompleteInobjectSlackTrackingIfActive();
+
+      // Compute instance size from initial map of {original_constructor}.
+      Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+      int const instance_size = initial_map->instance_size();
+
+      // Add a dependency on the {initial_map} to make sure that this code is
+      // deoptimized whenever the {initial_map} of the {original_constructor}
+      // changes.
+      dependencies()->AssumeInitialMapCantChange(initial_map);
+
+      // Emit code to allocate the JSObject instance for the
+      // {original_constructor}.
+      AllocationBuilder a(jsgraph(), effect, graph()->start());
+      a.Allocate(instance_size);
+      a.Store(AccessBuilder::ForMap(), initial_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(),
+              jsgraph()->EmptyFixedArrayConstant());
+      a.Store(AccessBuilder::ForJSObjectElements(),
+              jsgraph()->EmptyFixedArrayConstant());
+      for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+        a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+                jsgraph()->UndefinedConstant());
+      }
+      a.FinishAndChange(node);
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
+  CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
+  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+  // Use the ArgumentsAccessStub for materializing both mapped and unmapped
+  // arguments object, but only for non-inlined (i.e. outermost) frames.
+  if (outer_state->opcode() != IrOpcode::kFrameState) {
+    switch (type) {
+      case CreateArgumentsType::kMappedArguments: {
+        // TODO(mstarzinger): Duplicate parameters are not handled yet.
+        Handle<SharedFunctionInfo> shared_info;
+        if (!state_info.shared_info().ToHandle(&shared_info) ||
+            shared_info->has_duplicate_parameters()) {
+          return NoChange();
+        }
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+      case CreateArgumentsType::kUnmappedArguments: {
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+      case CreateArgumentsType::kRestParameter: {
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewRestParameter(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+    }
+    UNREACHABLE();
+  } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+    // Use inline allocation for all mapped arguments objects within inlined
+    // (i.e. non-outermost) frames, independent of the object size.
+    if (type == CreateArgumentsType::kMappedArguments) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      Node* const callee = NodeProperties::GetValueInput(node, 0);
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // TODO(mstarzinger): Duplicate parameters are not handled yet.
+      if (shared->has_duplicate_parameters()) return NoChange();
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by arguments object.
+      bool has_aliased_arguments = false;
+      Node* const elements = AllocateAliasedArguments(
+          effect, control, args_state, context, shared, &has_aliased_arguments);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the arguments object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_arguments_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+                                    : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the arguments object.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
+      STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+      a.Allocate(JSSloppyArgumentsObject::kSize);
+      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+      a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    } else if (type == CreateArgumentsType::kUnmappedArguments) {
+      // Use inline allocation for all unmapped arguments objects within inlined
+      // (i.e. non-outermost) frames, independent of the object size.
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by arguments object.
+      Node* const elements = AllocateArguments(effect, control, args_state);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the arguments object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_arguments_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              Context::STRICT_ARGUMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the arguments object.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
+      STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+      a.Allocate(JSStrictArgumentsObject::kSize);
+      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      int start_index = shared->internal_formal_parameter_count();
+      // Use inline allocation for all unmapped arguments objects within inlined
+      // (i.e. non-outermost) frames, independent of the object size.
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by the rest array.
+      Node* const elements =
+          AllocateRestArguments(effect, control, args_state, start_index);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the JSArray object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_jsarray_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the jsarray.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+      // -1 to minus receiver
+      int argument_count = args_state_info.parameter_count() - 1;
+      int length = std::max(0, argument_count - start_index);
+      STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+      a.Allocate(JSArray::kSize);
+      a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+              jsgraph()->Constant(length));
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
+                                           int capacity,
+                                           Handle<AllocationSite> site) {
+  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Extract transition and tenuring feedback from the {site} and add
+  // appropriate code dependencies on the {site} if deoptimization is
+  // enabled.
+  PretenureFlag pretenure = site->GetPretenureMode();
+  ElementsKind elements_kind = site->GetElementsKind();
+  DCHECK(IsFastElementsKind(elements_kind));
+  dependencies()->AssumeTenuringDecision(site);
+  dependencies()->AssumeTransitionStable(site);
+
+  // Retrieve the initial map for the array from the appropriate native context.
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  Node* js_array_map = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+      native_context, native_context, effect);
+
+  // Setup elements and properties.
+  Node* elements;
+  if (capacity == 0) {
+    elements = jsgraph()->EmptyFixedArrayConstant();
+  } else {
+    elements = effect =
+        AllocateElements(effect, control, elements_kind, capacity, pretenure);
+  }
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+  // Perform the allocation of the actual JSArray object.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.Allocate(JSArray::kSize, pretenure);
+  a.Store(AccessBuilder::ForMap(), js_array_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+  a.Store(AccessBuilder::ForJSObjectElements(), elements);
+  a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+  CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+  Node* target = NodeProperties::GetValueInput(node, 0);
+  Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+  // TODO(bmeurer): Optimize the subclassing case.
+  if (target != new_target) return NoChange();
+
+  // Check if we have a feedback {site} on the {node}.
+  Handle<AllocationSite> site = p.site();
+  if (p.site().is_null()) return NoChange();
+
+  // Attempt to inline calls to the Array constructor for the relevant cases
+  // where either no arguments are provided, or exactly one unsigned number
+  // argument is given.
+  if (site->CanInlineCall()) {
+    if (p.arity() == 0) {
+      Node* length = jsgraph()->ZeroConstant();
+      int capacity = JSArray::kPreallocatedArrayElements;
+      return ReduceNewArray(node, length, capacity, site);
+    } else if (p.arity() == 1) {
+      Node* length = NodeProperties::GetValueInput(node, 2);
+      Type* length_type = NodeProperties::GetType(length);
+      if (length_type->Is(Type::SignedSmall()) &&
+          length_type->Min() >= 0 &&
+          length_type->Max() <= kElementLoopUnrollLimit) {
+        int capacity = static_cast<int>(length_type->Max());
+        return ReduceNewArray(node, length, capacity, site);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* done = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  // Load the JSIteratorResult map for the {context}.
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  Node* iterator_result_map = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+      native_context, native_context, effect);
+
+  // Emit code to allocate the JSIteratorResult instance.
+  AllocationBuilder a(jsgraph(), effect, graph()->start());
+  a.Allocate(JSIteratorResult::kSize);
+  a.Store(AccessBuilder::ForMap(), iterator_result_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(),
+          jsgraph()->EmptyFixedArrayConstant());
+  a.Store(AccessBuilder::ForJSObjectElements(),
+          jsgraph()->EmptyFixedArrayConstant());
+  a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+  a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
+         node->opcode() == IrOpcode::kJSCreateLiteralObject);
+  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  Handle<LiteralsArray> literals_array;
+  if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
+    Handle<Object> literal(literals_array->literal(p.index()), isolate());
+    if (literal->IsAllocationSite()) {
+      Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
+      Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
+                                   isolate());
+      int max_properties = kMaxFastLiteralProperties;
+      if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+        AllocationSiteUsageContext site_context(isolate(), site, false);
+        site_context.EnterNewScope();
+        Node* value = effect =
+            AllocateFastLiteral(effect, control, boilerplate, &site_context);
+        site_context.ExitScope(site, boilerplate);
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+  int slot_count = OpParameter<int>(node->op());
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+  // Use inline allocation for function contexts up to a size limit.
+  if (slot_count < kFunctionContextAllocationLimit) {
+    // JSCreateFunctionContext[slot_count < limit]](fun)
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* extension = jsgraph()->TheHoleConstant();
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    AllocationBuilder a(jsgraph(), effect, control);
+    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+    int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+    a.AllocateArray(context_length, factory()->function_context_map());
+    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+            native_context);
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+    }
+    RelaxControls(node);
+    a.FinishAndChange(node);
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+  Node* object = NodeProperties::GetValueInput(node, 0);
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  AllocationBuilder a(jsgraph(), effect, control);
+  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+  a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+          native_context);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+  Handle<String> name = OpParameter<Handle<String>>(node);
+  Node* exception = NodeProperties::GetValueInput(node, 0);
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  AllocationBuilder a(jsgraph(), effect, control);
+  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+  a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+                  factory()->catch_context_map());
+  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+          native_context);
+  a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+          exception);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
+  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+  int const context_length = scope_info->ContextLength();
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+  // Use inline allocation for block contexts up to a size limit.
+  if (context_length < kBlockContextAllocationLimit) {
+    // JSCreateBlockContext[scope[length < limit]](fun)
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* extension = jsgraph()->Constant(scope_info);
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    AllocationBuilder a(jsgraph(), effect, control);
+    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+    a.AllocateArray(context_length, factory()->block_context_map());
+    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+            native_context);
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+    }
+    RelaxControls(node);
+    a.FinishAndChange(node);
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
+                                          Node* frame_state) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto parameters_it = ++parameters_access.begin();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(argument_count, factory()->fixed_array_map());
+  for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+  }
+  return a.Finish();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
+                                              Node* frame_state,
+                                              int start_index) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  int num_elements = std::max(0, argument_count - start_index);
+  if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto parameters_it = ++parameters_access.begin();
+
+  // Skip unused arguments.
+  for (int i = 0; i < start_index; i++) {
+    ++parameters_it;
+  }
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(num_elements, factory()->fixed_array_map());
+  for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+  }
+  return a.Finish();
+}
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateAliasedArguments(
+    Node* effect, Node* control, Node* frame_state, Node* context,
+    Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // If there is no aliasing, the arguments object elements are not special in
+  // any way, we can just return an unmapped backing store instead.
+  int parameter_count = shared->internal_formal_parameter_count();
+  if (parameter_count == 0) {
+    return AllocateArguments(effect, control, frame_state);
+  }
+
+  // Calculate number of argument values being aliased/mapped.
+  int mapped_count = Min(argument_count, parameter_count);
+  *has_aliased_arguments = true;
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto paratemers_it = ++parameters_access.begin();
+
+  // The unmapped argument values recorded in the frame state are stored yet
+  // another indirection away and then linked into the parameter map below,
+  // whereas mapped argument values are replaced with a hole instead.
+  AllocationBuilder aa(jsgraph(), effect, control);
+  aa.AllocateArray(argument_count, factory()->fixed_array_map());
+  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+  }
+  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+  }
+  Node* arguments = aa.Finish();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), arguments, control);
+  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+  a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+  a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+  for (int i = 0; i < mapped_count; ++i) {
+    int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+    a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+  }
+  return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
+                                         ElementsKind elements_kind,
+                                         int capacity,
+                                         PretenureFlag pretenure) {
+  DCHECK_LE(1, capacity);
+  DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+  Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+                                 ? factory()->fixed_double_array_map()
+                                 : factory()->fixed_array_map();
+  ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+                             ? AccessBuilder::ForFixedDoubleArrayElement()
+                             : AccessBuilder::ForFixedArrayElement();
+  Node* value =
+      IsFastDoubleElementsKind(elements_kind)
+          ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+          : jsgraph()->TheHoleConstant();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(capacity, elements_map, pretenure);
+  for (int i = 0; i < capacity; ++i) {
+    Node* index = jsgraph()->Constant(i);
+    a.Store(access, index, value);
+  }
+  return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteral(
+    Node* effect, Node* control, Handle<JSObject> boilerplate,
+    AllocationSiteUsageContext* site_context) {
+  Handle<AllocationSite> current_site(*site_context->current(), isolate());
+  dependencies()->AssumeTransitionStable(current_site);
+
+  PretenureFlag pretenure = NOT_TENURED;
+  if (FLAG_allocation_site_pretenuring) {
+    Handle<AllocationSite> top_site(*site_context->top(), isolate());
+    pretenure = top_site->GetPretenureMode();
+    if (current_site.is_identical_to(top_site)) {
+      // We install a dependency for pretenuring only on the outermost literal.
+      dependencies()->AssumeTenuringDecision(top_site);
+    }
+  }
+
+  // Setup the properties backing store.
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+  // Setup the elements backing store.
+  Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
+                                               pretenure, site_context);
+  if (elements->op()->EffectOutputCount() > 0) effect = elements;
+
+  // Compute the in-object properties to store first (might have effects).
+  Handle<Map> boilerplate_map(boilerplate->map(), isolate());
+  ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
+  inobject_fields.reserve(boilerplate_map->GetInObjectProperties());
+  int const boilerplate_nof = boilerplate_map->NumberOfOwnDescriptors();
+  for (int i = 0; i < boilerplate_nof; ++i) {
+    PropertyDetails const property_details =
+        boilerplate_map->instance_descriptors()->GetDetails(i);
+    if (property_details.type() != DATA) continue;
+    Handle<Name> property_name(
+        boilerplate_map->instance_descriptors()->GetKey(i), isolate());
+    FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
+    FieldAccess access = {kTaggedBase, index.offset(), property_name,
+                          Type::Tagged(), MachineType::AnyTagged()};
+    Node* value;
+    if (boilerplate->IsUnboxedDoubleField(index)) {
+      access.machine_type = MachineType::Float64();
+      access.type = Type::Number();
+      value = jsgraph()->Constant(boilerplate->RawFastDoublePropertyAt(index));
+    } else {
+      Handle<Object> boilerplate_value(boilerplate->RawFastPropertyAt(index),
+                                       isolate());
+      if (boilerplate_value->IsJSObject()) {
+        Handle<JSObject> boilerplate_object =
+            Handle<JSObject>::cast(boilerplate_value);
+        Handle<AllocationSite> current_site = site_context->EnterNewScope();
+        value = effect = AllocateFastLiteral(effect, control,
+                                             boilerplate_object, site_context);
+        site_context->ExitScope(current_site, boilerplate_object);
+      } else if (property_details.representation().IsDouble()) {
+        // Allocate a mutable HeapNumber box and store the value into it.
+        value = effect = AllocateMutableHeapNumber(
+            Handle<HeapNumber>::cast(boilerplate_value)->value(),
+            effect, control);
+      } else if (property_details.representation().IsSmi()) {
+        // Ensure that value is stored as smi.
+        value = boilerplate_value->IsUninitialized()
+                    ? jsgraph()->ZeroConstant()
+                    : jsgraph()->Constant(boilerplate_value);
+      } else {
+        value = jsgraph()->Constant(boilerplate_value);
+      }
+    }
+    inobject_fields.push_back(std::make_pair(access, value));
+  }
+
+  // Fill slack at the end of the boilerplate object with filler maps.
+  int const boilerplate_length = boilerplate_map->GetInObjectProperties();
+  for (int index = static_cast<int>(inobject_fields.size());
+       index < boilerplate_length; ++index) {
+    FieldAccess access =
+        AccessBuilder::ForJSObjectInObjectProperty(boilerplate_map, index);
+    Node* value = jsgraph()->HeapConstant(factory()->one_pointer_filler_map());
+    inobject_fields.push_back(std::make_pair(access, value));
+  }
+
+  // Actually allocate and initialize the object.
+  AllocationBuilder builder(jsgraph(), effect, control);
+  builder.Allocate(boilerplate_map->instance_size(), pretenure);
+  builder.Store(AccessBuilder::ForMap(), boilerplate_map);
+  builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
+  builder.Store(AccessBuilder::ForJSObjectElements(), elements);
+  if (boilerplate_map->IsJSArrayMap()) {
+    Handle<JSArray> boilerplate_array = Handle<JSArray>::cast(boilerplate);
+    builder.Store(
+        AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
+        handle(boilerplate_array->length(), isolate()));
+  }
+  for (auto const inobject_field : inobject_fields) {
+    builder.Store(inobject_field.first, inobject_field.second);
+  }
+  return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteralElements(
+    Node* effect, Node* control, Handle<JSObject> boilerplate,
+    PretenureFlag pretenure, AllocationSiteUsageContext* site_context) {
+  Handle<FixedArrayBase> boilerplate_elements(boilerplate->elements(),
+                                              isolate());
+
+  // Empty or copy-on-write elements just store a constant.
+  if (boilerplate_elements->length() == 0 ||
+      boilerplate_elements->map() == isolate()->heap()->fixed_cow_array_map()) {
+    if (pretenure == TENURED &&
+        isolate()->heap()->InNewSpace(*boilerplate_elements)) {
+      // If we would like to pretenure a fixed cow array, we must ensure that
+      // the array is already in old space, otherwise we'll create too many
+      // old-to-new-space pointers (overflowing the store buffer).
+      boilerplate_elements = Handle<FixedArrayBase>(
+          isolate()->factory()->CopyAndTenureFixedCOWArray(
+              Handle<FixedArray>::cast(boilerplate_elements)));
+      boilerplate->set_elements(*boilerplate_elements);
+    }
+    return jsgraph()->HeapConstant(boilerplate_elements);
+  }
+
+  // Compute the elements to store first (might have effects).
+  int const elements_length = boilerplate_elements->length();
+  Handle<Map> elements_map(boilerplate_elements->map(), isolate());
+  ZoneVector<Node*> elements_values(elements_length, zone());
+  if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
+    Handle<FixedDoubleArray> elements =
+        Handle<FixedDoubleArray>::cast(boilerplate_elements);
+    for (int i = 0; i < elements_length; ++i) {
+      if (elements->is_the_hole(i)) {
+        // TODO(turbofan): We cannot currently safely pass thru the (signaling)
+        // hole NaN in C++ code, as the C++ compiler on Intel might use FPU
+        // instructions/registers for doubles and therefore make the NaN quiet.
+        // We should consider passing doubles in the compiler as raw int64
+        // values to prevent this.
+        elements_values[i] = effect =
+            graph()->NewNode(simplified()->LoadElement(
+                                 AccessBuilder::ForFixedDoubleArrayElement()),
+                             jsgraph()->HeapConstant(elements),
+                             jsgraph()->Constant(i), effect, control);
+      } else {
+        elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
+      }
+    }
+  } else {
+    Handle<FixedArray> elements =
+        Handle<FixedArray>::cast(boilerplate_elements);
+    for (int i = 0; i < elements_length; ++i) {
+      if (elements->is_the_hole(i)) {
+        elements_values[i] = jsgraph()->TheHoleConstant();
+      } else {
+        Handle<Object> element_value(elements->get(i), isolate());
+        if (element_value->IsJSObject()) {
+          Handle<JSObject> boilerplate_object =
+              Handle<JSObject>::cast(element_value);
+          Handle<AllocationSite> current_site = site_context->EnterNewScope();
+          elements_values[i] = effect = AllocateFastLiteral(
+              effect, control, boilerplate_object, site_context);
+          site_context->ExitScope(current_site, boilerplate_object);
+        } else {
+          elements_values[i] = jsgraph()->Constant(element_value);
+        }
+      }
+    }
+  }
+
+  // Allocate the backing store array and store the elements.
+  AllocationBuilder builder(jsgraph(), effect, control);
+  builder.AllocateArray(elements_length, elements_map, pretenure);
+  ElementAccess const access =
+      (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
+          ? AccessBuilder::ForFixedDoubleArrayElement()
+          : AccessBuilder::ForFixedArrayElement();
+  for (int i = 0; i < elements_length; ++i) {
+    builder.Store(access, jsgraph()->Constant(i), elements_values[i]);
+  }
+  return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
+                                                  Node* control) {
+  // TODO(turbofan): Support inline allocation of MutableHeapNumber
+  // (requires proper alignment on Allocate, and Begin/FinishRegion).
+  Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+      CallDescriptor::kNoFlags, Operator::kNoThrow);
+  Node* result = effect = graph()->NewNode(
+      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+      jsgraph()->NoContextConstant(), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+      jsgraph()->Constant(value), effect, control);
+  return result;
+}
+
+MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+    Node* node) {
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+  switch (closure->opcode()) {
+    case IrOpcode::kHeapConstant: {
+      Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
+      return handle(Handle<JSFunction>::cast(object)->literals());
+    }
+    case IrOpcode::kParameter: {
+      int const index = ParameterIndexOf(closure->op());
+      // The closure is always the last parameter to a JavaScript function, and
+      // {Parameter} indices start at -1, so value outputs of {Start} look like
+      // this: closure, receiver, param0, ..., paramN, context.
+      if (index == -1) {
+        return literals_array_;
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return MaybeHandle<LiteralsArray>();
+}
+
+Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
+
+Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
+
+Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
+
+JSOperatorBuilder* JSCreateLowering::javascript() const {
+  return jsgraph()->javascript();
+}
+
+CommonOperatorBuilder* JSCreateLowering::common() const {
+  return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
+  return jsgraph()->simplified();
+}
+
+MachineOperatorBuilder* JSCreateLowering::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
new file mode 100644
index 0000000..d9d184b
--- /dev/null
+++ b/src/compiler/js-create-lowering.h
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CREATE_LOWERING_H_
+#define V8_COMPILER_JS_CREATE_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class AllocationSiteUsageContext;
+class CompilationDependencies;
+class Factory;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Lowers JSCreate-level operators to fast (inline) allocations.
+class JSCreateLowering final : public AdvancedReducer {
+ public:
+  JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
+                   JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+                   Zone* zone)
+      : AdvancedReducer(editor),
+        dependencies_(dependencies),
+        jsgraph_(jsgraph),
+        literals_array_(literals_array),
+        zone_(zone) {}
+  ~JSCreateLowering() final {}
+
+  Reduction Reduce(Node* node) final;
+
+ private:
+  Reduction ReduceJSCreate(Node* node);
+  Reduction ReduceJSCreateArguments(Node* node);
+  Reduction ReduceJSCreateArray(Node* node);
+  Reduction ReduceJSCreateIterResultObject(Node* node);
+  Reduction ReduceJSCreateLiteral(Node* node);
+  Reduction ReduceJSCreateFunctionContext(Node* node);
+  Reduction ReduceJSCreateWithContext(Node* node);
+  Reduction ReduceJSCreateCatchContext(Node* node);
+  Reduction ReduceJSCreateBlockContext(Node* node);
+  Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+                           Handle<AllocationSite> site);
+
+  Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+  Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+                              int start_index);
+  Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+                                 Node* context, Handle<SharedFunctionInfo>,
+                                 bool* has_aliased_arguments);
+  Node* AllocateElements(Node* effect, Node* control,
+                         ElementsKind elements_kind, int capacity,
+                         PretenureFlag pretenure);
+  Node* AllocateFastLiteral(Node* effect, Node* control,
+                            Handle<JSObject> boilerplate,
+                            AllocationSiteUsageContext* site_context);
+  Node* AllocateFastLiteralElements(Node* effect, Node* control,
+                                    Handle<JSObject> boilerplate,
+                                    PretenureFlag pretenure,
+                                    AllocationSiteUsageContext* site_context);
+  Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
+
+  // Infers the LiteralsArray to use for a given {node}.
+  MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+
+  Factory* factory() const;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Isolate* isolate() const;
+  JSOperatorBuilder* javascript() const;
+  CommonOperatorBuilder* common() const;
+  SimplifiedOperatorBuilder* simplified() const;
+  MachineOperatorBuilder* machine() const;
+  CompilationDependencies* dependencies() const { return dependencies_; }
+  Zone* zone() const { return zone_; }
+
+  CompilationDependencies* const dependencies_;
+  JSGraph* const jsgraph_;
+  MaybeHandle<LiteralsArray> const literals_array_;
+  Zone* const zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_CREATE_LOWERING_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 15ce908..df2d908 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -62,16 +62,11 @@
   return Changed(node);
 }
 
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token)                                  \
-  void JSGenericLowering::Lower##Op(Node* node) {                             \
-    BinaryOperationParameters const& p =                                      \
-        BinaryOperationParametersOf(node->op());                              \
-    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);             \
-    ReplaceWithStubCall(node,                                                 \
-                        CodeFactory::BinaryOpIC(isolate(), token,             \
-                                                strength(p.language_mode())), \
-                        CallDescriptor::kPatchableCallSiteWithNop | flags);   \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token)                                \
+  void JSGenericLowering::Lower##Op(Node* node) {                           \
+    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);           \
+    ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token),    \
+                        CallDescriptor::kPatchableCallSiteWithNop | flags); \
   }
 REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
 REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
@@ -86,128 +81,22 @@
 REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
 #undef REPLACE_BINARY_OP_IC_CALL
 
-
-// These ops are not language mode dependent; we arbitrarily pass Strength::WEAK
-// here.
-#define REPLACE_COMPARE_IC_CALL(op, token)             \
-  void JSGenericLowering::Lower##op(Node* node) {      \
-    ReplaceWithCompareIC(node, token, Strength::WEAK); \
-  }
-REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
-REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
-REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
-REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
-#undef REPLACE_COMPARE_IC_CALL
-
-
-#define REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(op, token)        \
-  void JSGenericLowering::Lower##op(Node* node) {                    \
-    ReplaceWithCompareIC(node, token,                                \
-                         strength(OpParameter<LanguageMode>(node))); \
-  }
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThan, Token::LT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThan, Token::GT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThanOrEqual, Token::LTE)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
-#undef REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE
-
-
 #define REPLACE_RUNTIME_CALL(op, fun)             \
   void JSGenericLowering::Lower##op(Node* node) { \
     ReplaceWithRuntimeCall(node, fun);            \
   }
-REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSEqual, Runtime::kEqual)
+REPLACE_RUNTIME_CALL(JSNotEqual, Runtime::kNotEqual)
+REPLACE_RUNTIME_CALL(JSStrictEqual, Runtime::kStrictEqual)
+REPLACE_RUNTIME_CALL(JSStrictNotEqual, Runtime::kStrictNotEqual)
+REPLACE_RUNTIME_CALL(JSLessThan, Runtime::kLessThan)
+REPLACE_RUNTIME_CALL(JSGreaterThan, Runtime::kGreaterThan)
+REPLACE_RUNTIME_CALL(JSLessThanOrEqual, Runtime::kLessThanOrEqual)
+REPLACE_RUNTIME_CALL(JSGreaterThanOrEqual, Runtime::kGreaterThanOrEqual)
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
 REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
-#undef REPLACE_RUNTIME
-
-
-static CallDescriptor::Flags FlagsForNode(Node* node) {
-  CallDescriptor::Flags result = CallDescriptor::kNoFlags;
-  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
-    result |= CallDescriptor::kNeedsFrameState;
-  }
-  return result;
-}
-
-
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
-                                             Strength str) {
-  Callable callable = CodeFactory::CompareIC(isolate(), token, str);
-
-  // Create a new call node asking a CompareIC for help.
-  NodeVector inputs(zone());
-  inputs.reserve(node->InputCount() + 1);
-  inputs.push_back(jsgraph()->HeapConstant(callable.code()));
-  inputs.push_back(NodeProperties::GetValueInput(node, 0));
-  inputs.push_back(NodeProperties::GetValueInput(node, 1));
-  inputs.push_back(NodeProperties::GetContextInput(node));
-  // Some comparisons (StrictEqual) don't have an effect, control or frame
-  // state inputs, so handle those cases here.
-  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
-    inputs.push_back(NodeProperties::GetFrameStateInput(node, 0));
-  }
-  Node* effect = (node->op()->EffectInputCount() > 0)
-                     ? NodeProperties::GetEffectInput(node)
-                     : graph()->start();
-  inputs.push_back(effect);
-  Node* control = (node->op()->ControlInputCount() > 0)
-                      ? NodeProperties::GetControlInput(node)
-                      : graph()->start();
-  inputs.push_back(control);
-  CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0,
-      CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
-      Operator::kNoProperties, MachineType::IntPtr());
-  Node* compare =
-      graph()->NewNode(common()->Call(desc_compare),
-                       static_cast<int>(inputs.size()), &inputs.front());
-
-  // Decide how the return value from the above CompareIC can be converted into
-  // a JavaScript boolean oddball depending on the given token.
-  Node* false_value = jsgraph()->FalseConstant();
-  Node* true_value = jsgraph()->TrueConstant();
-  const Operator* op = nullptr;
-  switch (token) {
-    case Token::EQ:  // a == 0
-    case Token::EQ_STRICT:
-      op = machine()->WordEqual();
-      break;
-    case Token::NE:  // a != 0 becomes !(a == 0)
-    case Token::NE_STRICT:
-      op = machine()->WordEqual();
-      std::swap(true_value, false_value);
-      break;
-    case Token::LT:  // a < 0
-      op = machine()->IntLessThan();
-      break;
-    case Token::GT:  // a > 0 becomes !(a <= 0)
-      op = machine()->IntLessThanOrEqual();
-      std::swap(true_value, false_value);
-      break;
-    case Token::LTE:  // a <= 0
-      op = machine()->IntLessThanOrEqual();
-      break;
-    case Token::GTE:  // a >= 0 becomes !(a < 0)
-      op = machine()->IntLessThan();
-      std::swap(true_value, false_value);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  Node* booleanize = graph()->NewNode(op, compare, jsgraph()->ZeroConstant());
-
-  // Finally patch the original node to select a boolean.
-  NodeProperties::ReplaceUses(node, node, compare, compare, compare);
-  node->TrimInputCount(3);
-  node->ReplaceInput(0, booleanize);
-  node->ReplaceInput(1, true_value);
-  node->ReplaceInput(2, false_value);
-  NodeProperties::ChangeOp(node,
-                           common()->Select(MachineRepresentation::kTagged));
-}
-
+#undef REPLACE_RUNTIME_CALL
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
@@ -223,11 +112,12 @@
 void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
                                                Runtime::FunctionId f,
                                                int nargs_override) {
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   Operator::Properties properties = node->op()->properties();
   const Runtime::Function* fun = Runtime::FunctionForId(f);
   int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), f, nargs, properties, CallDescriptor::kNeedsFrameState);
+  CallDescriptor* desc =
+      Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties, flags);
   Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
   Node* arity = jsgraph()->Int32Constant(nargs);
   node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
@@ -267,7 +157,9 @@
 
 
 void JSGenericLowering::LowerJSToName(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kToName);
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Callable callable = CodeFactory::ToName(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
@@ -279,99 +171,187 @@
 
 
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 2);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
-  Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
-      isolate(), p.language_mode(), UNINITIALIZED);
+  Callable callable =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
+      isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+      isolate(), p.typeof_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   // Load global object from the context.
-  Node* native_context =
+  Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
                        jsgraph()->IntPtrConstant(
                            Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
-                       effect, graph()->start());
-  Node* global = graph()->NewNode(
+                       effect, control);
+  Node* global = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), native_context,
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
-      effect, graph()->start());
+      effect, control);
   node->InsertInput(zone(), 0, global);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 3);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
   LanguageMode language_mode = p.language_mode();
   Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
       isolate(), language_mode, UNINITIALIZED);
-  DCHECK(p.feedback().index() != -1);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 2);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::StoreICInOptimizedCode(
       isolate(), p.language_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  DCHECK(p.feedback().index() != -1);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
   Callable callable = CodeFactory::StoreICInOptimizedCode(
       isolate(), p.language_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   // Load global object from the context.
-  Node* native_context =
+  Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
                        jsgraph()->IntPtrConstant(
                            Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
-                       effect, graph()->start());
-  Node* global = graph()->NewNode(
+                       effect, control);
+  Node* global = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), native_context,
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
-      effect, graph()->start());
+      effect, control);
   node->InsertInput(zone(), 0, global);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  DCHECK(p.feedback().index() != -1);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
@@ -433,38 +413,24 @@
 }
 
 
-void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
-  const DynamicAccess& access = DynamicAccessOf(node->op());
-  Runtime::FunctionId function_id =
-      (access.typeof_mode() == NOT_INSIDE_TYPEOF)
-          ? Runtime::kLoadLookupSlot
-          : Runtime::kLoadLookupSlotNoReferenceError;
-  Node* projection = graph()->NewNode(common()->Projection(0), node);
-  NodeProperties::ReplaceUses(node, projection, node, node, node);
-  node->RemoveInput(NodeProperties::FirstValueIndex(node));
-  node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
-  ReplaceWithRuntimeCall(node, function_id);
-  projection->ReplaceInput(0, node);
-}
-
-
 void JSGenericLowering::LowerJSCreate(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kNewObject);
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Callable callable = CodeFactory::FastNewObject(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSCreateArguments(Node* node) {
-  const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
-  switch (p.type()) {
-    case CreateArgumentsParameters::kMappedArguments:
+  CreateArgumentsType const type = CreateArgumentsTypeOf(node->op());
+  switch (type) {
+    case CreateArgumentsType::kMappedArguments:
       ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
       break;
-    case CreateArgumentsParameters::kUnmappedArguments:
-      ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
+    case CreateArgumentsType::kUnmappedArguments:
+      ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments);
       break;
-    case CreateArgumentsParameters::kRestArray:
-      node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
-      ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
+    case CreateArgumentsType::kRestParameter:
+      ReplaceWithRuntimeCall(node, Runtime::kNewRestParameter);
       break;
   }
 }
@@ -473,7 +439,8 @@
 void JSGenericLowering::LowerJSCreateArray(Node* node) {
   CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
   int const arity = static_cast<int>(p.arity());
-  Node* new_target = node->InputAt(1);
+  Handle<AllocationSite> const site = p.site();
+
   // TODO(turbofan): We embed the AllocationSite from the Operator at this
   // point, which we should not do once we want to both consume the feedback
   // but at the same time shared the optimized code across native contexts,
@@ -481,21 +448,93 @@
   // stored in the type feedback vector after all). Once we go for cross
   // context code generation, we should somehow find a way to get to the
   // allocation site for the actual native context at runtime.
-  Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
-                                       : jsgraph()->HeapConstant(p.site());
-  node->RemoveInput(1);
-  node->InsertInput(zone(), 1 + arity, new_target);
-  node->InsertInput(zone(), 2 + arity, type_info);
-  ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+  if (!site.is_null()) {
+    // Reduce {node} to the appropriate ArrayConstructorStub backend.
+    // Note that these stubs "behave" like JSFunctions, which means they
+    // expect a receiver on the stack, which they remove. We just push
+    // undefined for the receiver.
+    ElementsKind elements_kind = site->GetElementsKind();
+    AllocationSiteOverrideMode override_mode =
+        (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+            ? DISABLE_ALLOCATION_SITES
+            : DONT_OVERRIDE;
+    if (arity == 0) {
+      ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+                                          override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+          CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    } else if (arity == 1) {
+      // TODO(bmeurer): Optimize for the 0 length non-holey case?
+      ArraySingleArgumentConstructorStub stub(
+          isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+          CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    } else {
+      ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+                                          override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+          arity + 1, CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    }
+  } else {
+    Node* new_target = node->InputAt(1);
+    Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+                                     : jsgraph()->HeapConstant(site);
+    node->RemoveInput(1);
+    node->InsertInput(zone(), 1 + arity, new_target);
+    node->InsertInput(zone(), 2 + arity, type_info);
+    ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+  }
 }
 
 
 void JSGenericLowering::LowerJSCreateClosure(Node* node) {
-  CreateClosureParameters p = CreateClosureParametersOf(node->op());
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
-  ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
-                                   ? Runtime::kNewClosure_Tenured
-                                   : Runtime::kNewClosure);
+  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Handle<SharedFunctionInfo> const shared_info = p.shared_info();
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+
+  // Use the FastNewClosureStub that allocates in new space only for nested
+  // functions that don't need literals cloning.
+  if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
+    Callable callable = CodeFactory::FastNewClosure(
+        isolate(), shared_info->language_mode(), shared_info->kind());
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
+                                     ? Runtime::kNewClosure_Tenured
+                                     : Runtime::kNewClosure);
+  }
+}
+
+
+void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
+  int const slot_count = OpParameter<int>(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+
+  // Use the FastNewContextStub only for function contexts up maximum size.
+  if (slot_count <= FastNewContextStub::kMaximumSlots) {
+    Callable callable = CodeFactory::FastNewContext(isolate(), slot_count);
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
+  }
 }
 
 
@@ -506,19 +545,42 @@
 
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
-  node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
-  ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+
+  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
+  // initial length limit for arrays with "fast" elements kind.
+  if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
+      (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
+      length < JSArray::kInitialMaxFastElementArray) {
+    Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
+    ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+  }
 }
 
 
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
-  ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+
+  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
+  // elements up to the number of properties that the stubs can handle.
+  if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
+      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+  }
 }
 
 
@@ -614,173 +676,7 @@
 
 
 void JSGenericLowering::LowerJSForInPrepare(Node* node) {
-  Node* object = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-
-  // Get the set of properties to enumerate.
-  Runtime::Function const* function =
-      Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
-  CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function->function_id, 1, Operator::kNoProperties,
-      CallDescriptor::kNeedsFrameState);
-  Node* cache_type = effect = graph()->NewNode(
-      common()->Call(descriptor),
-      jsgraph()->CEntryStubConstant(function->result_size), object,
-      jsgraph()->ExternalConstant(function->function_id),
-      jsgraph()->Int32Constant(1), context, frame_state, effect, control);
-  control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
-  Node* object_map = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), object,
-      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-      effect, control);
-  Node* cache_type_map = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), cache_type,
-      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-      effect, control);
-  Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
-
-  // If we got a map from the GetPropertyNamesFast runtime call, we can do a
-  // fast modification check. Otherwise, we got a fixed array, and we have to
-  // perform a slow check on every iteration.
-  Node* check0 =
-      graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* cache_array_true0;
-  Node* cache_length_true0;
-  Node* cache_type_true0;
-  Node* etrue0;
-  {
-    // Enum cache case.
-    Node* cache_type_enum_length = etrue0 = graph()->NewNode(
-        machine()->Load(MachineType::Uint32()), cache_type,
-        jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
-        effect, if_true0);
-    cache_type_enum_length =
-        graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
-                         jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
-
-    Node* check1 =
-        graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
-                         jsgraph()->Int32Constant(0));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* cache_array_true1;
-    Node* etrue1;
-    {
-      // No properties to enumerate.
-      cache_array_true1 =
-          jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
-      etrue1 = etrue0;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* cache_array_false1;
-    Node* efalse1;
-    {
-      // Load the enumeration cache from the instance descriptors of {object}.
-      Node* object_map_descriptors = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map,
-          jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
-          etrue0, if_false1);
-      Node* object_map_enum_cache = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
-          jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
-                                    kHeapObjectTag),
-          efalse1, if_false1);
-      cache_array_false1 = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
-          jsgraph()->IntPtrConstant(
-              DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
-          efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    cache_array_true0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                         cache_array_true1, cache_array_false1, if_true0);
-
-    cache_length_true0 = graph()->NewNode(
-        machine()->WordShl(),
-        machine()->Is64()
-            ? graph()->NewNode(machine()->ChangeUint32ToUint64(),
-                               cache_type_enum_length)
-            : cache_type_enum_length,
-        jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
-    cache_type_true0 = cache_type;
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* cache_array_false0;
-  Node* cache_length_false0;
-  Node* cache_type_false0;
-  Node* efalse0;
-  {
-    // FixedArray case.
-    cache_type_false0 = jsgraph()->OneConstant();  // Smi means slow check
-    cache_array_false0 = cache_type;
-    cache_length_false0 = efalse0 = graph()->NewNode(
-        machine()->Load(MachineType::AnyTagged()), cache_array_false0,
-        jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
-        effect, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* cache_array =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_array_true0, cache_array_false0, control);
-  Node* cache_length =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_length_true0, cache_length_false0, control);
-  cache_type =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_type_true0, cache_type_false0, control);
-
-  for (auto edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(effect);
-    } else if (NodeProperties::IsControlEdge(edge)) {
-      Node* const use = edge.from();
-      if (use->opcode() == IrOpcode::kIfSuccess) {
-        use->ReplaceUses(control);
-        use->Kill();
-      } else if (use->opcode() == IrOpcode::kIfException) {
-        edge.UpdateTo(cache_type_true0);
-      } else {
-        UNREACHABLE();
-      }
-    } else {
-      Node* const use = edge.from();
-      DCHECK(NodeProperties::IsValueEdge(edge));
-      DCHECK_EQ(IrOpcode::kProjection, use->opcode());
-      switch (ProjectionIndexOf(use->op())) {
-        case 0:
-          use->ReplaceUses(cache_type);
-          break;
-        case 1:
-          use->ReplaceUses(cache_array);
-          break;
-        case 2:
-          use->ReplaceUses(cache_length);
-          break;
-        default:
-          UNREACHABLE();
-          break;
-      }
-      use->Kill();
-    }
-  }
+  ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
 }
 
 
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index ffce912..5ee759b 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -36,7 +36,6 @@
 #undef DECLARE_LOWER
 
   // Helpers to replace existing nodes with a generic call.
-  void ReplaceWithCompareIC(Node* node, Token::Value token, Strength strength);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index e6f01b3..132dec6 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -27,11 +27,10 @@
 
 
 JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
-    Editor* editor, JSGraph* jsgraph, Flags flags,
+    Editor* editor, JSGraph* jsgraph,
     MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
-      flags_(flags),
       native_context_(native_context),
       dependencies_(dependencies),
       type_cache_(TypeCache::Get()) {}
@@ -49,7 +48,6 @@
   return NoChange();
 }
 
-
 Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
   Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
@@ -88,47 +86,36 @@
     return Replace(value);
   }
 
-  // Load from non-configurable, data property on the global can be lowered to
-  // a field load, even without deoptimization, because the property cannot be
-  // deleted or reconfigured to an accessor/interceptor property.  Yet, if
-  // deoptimization support is available, we can constant-fold certain global
-  // properties or at least lower them to field loads annotated with more
-  // precise type feedback.
+  // Record a code dependency on the cell if we can benefit from the
+  // additional feedback, or the global property is configurable (i.e.
+  // can be deleted or reconfigured to an accessor property).
+  if (property_details.cell_type() != PropertyCellType::kMutable ||
+      property_details.IsConfigurable()) {
+    dependencies()->AssumePropertyCell(property_cell);
+  }
+
+  // Load from constant/undefined global property can be constant-folded.
+  if (property_details.cell_type() == PropertyCellType::kConstant ||
+      property_details.cell_type() == PropertyCellType::kUndefined) {
+    Node* value = jsgraph()->Constant(property_cell_value);
+    ReplaceWithValue(node, value);
+    return Replace(value);
+  }
+
+  // Load from constant type cell can benefit from type feedback.
   Type* property_cell_value_type = Type::Tagged();
-  if (flags() & kDeoptimizationEnabled) {
-    // Record a code dependency on the cell if we can benefit from the
-    // additional feedback, or the global property is configurable (i.e.
-    // can be deleted or reconfigured to an accessor property).
-    if (property_details.cell_type() != PropertyCellType::kMutable ||
-        property_details.IsConfigurable()) {
-      dependencies()->AssumePropertyCell(property_cell);
+  if (property_details.cell_type() == PropertyCellType::kConstantType) {
+    // Compute proper type based on the current value in the cell.
+    if (property_cell_value->IsSmi()) {
+      property_cell_value_type = type_cache_.kSmi;
+    } else if (property_cell_value->IsNumber()) {
+      property_cell_value_type = type_cache_.kHeapNumber;
+    } else {
+      Handle<Map> property_cell_value_map(
+          Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+      property_cell_value_type =
+          Type::Class(property_cell_value_map, graph()->zone());
     }
-
-    // Load from constant/undefined global property can be constant-folded.
-    if ((property_details.cell_type() == PropertyCellType::kConstant ||
-         property_details.cell_type() == PropertyCellType::kUndefined)) {
-      Node* value = jsgraph()->Constant(property_cell_value);
-      ReplaceWithValue(node, value);
-      return Replace(value);
-    }
-
-    // Load from constant type cell can benefit from type feedback.
-    if (property_details.cell_type() == PropertyCellType::kConstantType) {
-      // Compute proper type based on the current value in the cell.
-      if (property_cell_value->IsSmi()) {
-        property_cell_value_type = type_cache_.kSmi;
-      } else if (property_cell_value->IsNumber()) {
-        property_cell_value_type = type_cache_.kHeapNumber;
-      } else {
-        Handle<Map> property_cell_value_map(
-            Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-        property_cell_value_type =
-            Type::Class(property_cell_value_map, graph()->zone());
-      }
-    }
-  } else if (property_details.IsConfigurable()) {
-    // Access to configurable global properties requires deoptimization support.
-    return NoChange();
   }
   Node* value = effect = graph()->NewNode(
       simplified()->LoadField(
@@ -178,9 +165,8 @@
       return NoChange();
     }
     case PropertyCellType::kConstant: {
-      // Store to constant property cell requires deoptimization support,
-      // because we might even need to eager deoptimize for mismatch.
-      if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+      // Record a code dependency on the cell, and just deoptimize if the new
+      // value doesn't match the previous value stored inside the cell.
       dependencies()->AssumePropertyCell(property_cell);
       Node* check =
           graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
@@ -193,13 +179,13 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
       break;
     }
     case PropertyCellType::kConstantType: {
-      // Store to constant-type property cell requires deoptimization support,
-      // because we might even need to eager deoptimize for mismatch.
-      if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+      // Record a code dependency on the cell, and just deoptimize if the new
+      // values' type doesn't match the type of the previous value in the cell.
       dependencies()->AssumePropertyCell(property_cell);
       Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
       Type* property_cell_value_type = Type::TaggedSigned();
@@ -213,6 +199,7 @@
                              frame_state, effect, if_true);
         // TODO(bmeurer): This should be on the AdvancedReducer somehow.
         NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+        Revisit(graph()->end());
         control = graph()->NewNode(common()->IfFalse(), branch);
 
         // Load the {value} map check against the {property_cell} map.
@@ -234,6 +221,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
       effect = graph()->NewNode(
           simplified()->StoreField(
@@ -243,13 +231,11 @@
     }
     case PropertyCellType::kMutable: {
       // Store to non-configurable, data property on the global can be lowered
-      // to a field store, even without deoptimization, because the property
-      // cannot be deleted or reconfigured to an accessor/interceptor property.
+      // to a field store, even without recording a code dependency on the cell,
+      // because the property cannot be deleted or reconfigured to an accessor
+      // or interceptor property.
       if (property_details.IsConfigurable()) {
-        // With deoptimization support, we can lower stores even to configurable
-        // data properties on the global object, by adding a code dependency on
-        // the cell.
-        if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+        // Protect lowering by recording a code dependency on the cell.
         dependencies()->AssumePropertyCell(property_cell);
       }
       effect = graph()->NewNode(
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
index 83d890c..3ffc67a 100644
--- a/src/compiler/js-global-object-specialization.h
+++ b/src/compiler/js-global-object-specialization.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
 #define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
 
-#include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 
 namespace v8 {
@@ -30,14 +29,7 @@
 // nodes.
 class JSGlobalObjectSpecialization final : public AdvancedReducer {
  public:
-  // Flags that control the mode of operation.
-  enum Flag {
-    kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
-  };
-  typedef base::Flags<Flag> Flags;
-
-  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
                                MaybeHandle<Context> native_context,
                                CompilationDependencies* dependencies);
 
@@ -61,12 +53,10 @@
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
-  Flags flags() const { return flags_; }
   MaybeHandle<Context> native_context() const { return native_context_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
 
   JSGraph* const jsgraph_;
-  Flags const flags_;
   MaybeHandle<Context> native_context_;
   CompilationDependencies* const dependencies_;
   TypeCache const& type_cache_;
@@ -74,8 +64,6 @@
   DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
 };
 
-DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 99a1547..2244f9b 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -205,6 +205,7 @@
       case IrOpcode::kThrow:
         NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
                                           input);
+        Revisit(jsgraph_->graph()->end());
         break;
       default:
         UNREACHABLE();
@@ -243,8 +244,7 @@
                                             Handle<SharedFunctionInfo> shared) {
   const FrameStateFunctionInfo* state_info =
       jsgraph_->common()->CreateFrameStateFunctionInfo(
-          frame_state_type, parameter_count + 1, 0, shared,
-          CALL_MAINTAINS_NATIVE_CONTEXT);
+          frame_state_type, parameter_count + 1, 0, shared);
 
   const Operator* op = jsgraph_->common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
@@ -267,10 +267,18 @@
 namespace {
 
 // TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
-bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
-  Code* construct_stub = function->shared()->construct_stub();
-  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
-         construct_stub != *isolate->builtins()->ConstructedNonConstructable();
+bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* const isolate = shared_info->GetIsolate();
+  Code* const construct_stub = shared_info->construct_stub();
+  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+}
+
+bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* const isolate = shared_info->GetIsolate();
+  Code* const construct_stub = shared_info->construct_stub();
+  return construct_stub == *isolate->builtins()->ConstructedNonConstructable();
 }
 
 }  // namespace
@@ -294,20 +302,21 @@
 Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
   JSCallAccessor call(node);
+  Handle<SharedFunctionInfo> shared_info(function->shared());
 
   // Function must be inlineable.
-  if (!function->shared()->IsInlineable()) {
+  if (!shared_info->IsInlineable()) {
     TRACE("Not inlining %s into %s because callee is not inlineable\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   // Constructor must be constructable.
   if (node->opcode() == IrOpcode::kJSCallConstruct &&
-      !function->IsConstructor()) {
+      IsNonConstructible(shared_info)) {
     TRACE("Not inlining %s into %s because constructor is not constructable.\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -315,17 +324,17 @@
   // Class constructors are callable, but [[Call]] will raise an exception.
   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      IsClassConstructor(function->shared()->kind())) {
+      IsClassConstructor(shared_info->kind())) {
     TRACE("Not inlining %s into %s because callee is a class constructor.\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   // Function contains break points.
-  if (function->shared()->HasDebugInfo()) {
+  if (shared_info->HasDebugInfo()) {
     TRACE("Not inlining %s into %s because callee may contain break points\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -341,7 +350,7 @@
   if (function->context()->native_context() !=
       info_->context()->native_context()) {
     TRACE("Not inlining %s into %s because of different native contexts\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -352,12 +361,12 @@
   for (Node* frame_state = call.frame_state_after();
        frame_state->opcode() == IrOpcode::kFrameState;
        frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
-    FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
-    Handle<SharedFunctionInfo> shared_info;
-    if (info.shared_info().ToHandle(&shared_info) &&
-        *shared_info == function->shared()) {
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    Handle<SharedFunctionInfo> frame_shared_info;
+    if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
+        *frame_shared_info == *shared_info) {
       TRACE("Not inlining %s into %s because call is recursive\n",
-            function->shared()->DebugName()->ToCString().get(),
+            shared_info->DebugName()->ToCString().get(),
             info_->shared_info()->DebugName()->ToCString().get());
       return NoChange();
     }
@@ -366,7 +375,7 @@
   // TODO(turbofan): Inlining into a try-block is not yet supported.
   if (NodeProperties::IsExceptionalCall(node)) {
     TRACE("Not inlining %s into %s because of surrounding try-block\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -374,13 +383,11 @@
   Zone zone;
   ParseInfo parse_info(&zone, function);
   CompilationInfo info(&parse_info);
-  if (info_->is_deoptimization_enabled()) {
-    info.MarkAsDeoptimizationEnabled();
-  }
+  if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
 
   if (!Compiler::ParseAndAnalyze(info.parse_info())) {
     TRACE("Not inlining %s into %s because parsing failed\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     if (info_->isolate()->has_pending_exception()) {
       info_->isolate()->clear_pending_exception();
@@ -394,28 +401,28 @@
   if (is_strong(info.language_mode()) &&
       call.formal_arguments() < parameter_count) {
     TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   if (!Compiler::EnsureDeoptimizationSupport(&info)) {
     TRACE("Not inlining %s into %s because deoptimization support failed\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
-  info_->AddInlinedFunction(info.shared_info());
+  info_->AddInlinedFunction(shared_info);
 
   // ----------------------------------------------------------------
   // After this point, we've made a decision to inline this function.
   // We shall not bailout from inlining if we got here.
 
   TRACE("Inlining %s into %s\n",
-        function->shared()->DebugName()->ToCString().get(),
+        shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
   // TODO(mstarzinger): We could use the temporary zone for the graph because
@@ -442,7 +449,7 @@
   // Note that the context has to be the callers context (input to call node).
   Node* receiver = jsgraph_->UndefinedConstant();  // Implicit receiver.
   if (node->opcode() == IrOpcode::kJSCallConstruct &&
-      NeedsImplicitReceiver(function, info_->isolate())) {
+      NeedsImplicitReceiver(shared_info)) {
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* context = NodeProperties::GetContextInput(node);
     Node* create = jsgraph_->graph()->NewNode(
@@ -491,7 +498,7 @@
   // in that frame state tho, as the conversion of the receiver can be repeated
   // any number of times, it's not observable.
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      is_sloppy(info.language_mode()) && !function->shared()->native()) {
+      is_sloppy(info.language_mode()) && !shared_info->native()) {
     const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* convert = jsgraph_->graph()->NewNode(
@@ -509,7 +516,7 @@
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(),
-        FrameStateType::kArgumentsAdaptor, info.shared_info());
+        FrameStateType::kArgumentsAdaptor, shared_info);
   }
 
   return InlineCall(node, new_target, context, frame_state, start, end);
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index ca5cb93..abeb110 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -49,20 +49,14 @@
       return ReduceIncrementStatsCounter(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
-    case Runtime::kInlineIsDate:
-      return ReduceIsInstanceType(node, JS_DATE_TYPE);
     case Runtime::kInlineIsTypedArray:
       return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
-    case Runtime::kInlineIsFunction:
-      return ReduceIsFunction(node);
     case Runtime::kInlineIsRegExp:
       return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
     case Runtime::kInlineIsJSReceiver:
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
       return ReduceIsSmi(node);
-    case Runtime::kInlineJSValueGetValue:
-      return ReduceJSValueGetValue(node);
     case Runtime::kInlineMathClz32:
       return ReduceMathClz32(node);
     case Runtime::kInlineMathFloor:
@@ -71,8 +65,6 @@
       return ReduceMathSqrt(node);
     case Runtime::kInlineValueOf:
       return ReduceValueOf(node);
-    case Runtime::kInlineIsMinusZero:
-      return ReduceIsMinusZero(node);
     case Runtime::kInlineFixedArrayGet:
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
@@ -148,6 +140,7 @@
       graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
                        frame_state, effect, control);
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   node->TrimInputCount(0);
   NodeProperties::ChangeOp(node, common()->Dead());
@@ -229,89 +222,8 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  if (value_type->Is(Type::Function())) {
-    value = jsgraph()->TrueConstant();
-  } else {
-    // if (%_IsSmi(value)) {
-    //   return false;
-    // } else {
-    //   return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
-    // }
-    STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
-
-    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-    Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue = jsgraph()->FalseConstant();
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, effect, if_false),
-        effect, if_false);
-    Node* vfalse =
-        graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                         jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                             vtrue, vfalse, control);
-  }
-  ReplaceWithValue(node, node, effect, control);
-  return Replace(value);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  if (value_type->Is(Type::Receiver())) {
-    value = jsgraph()->TrueConstant();
-  } else if (!value_type->Maybe(Type::Receiver())) {
-    value = jsgraph()->FalseConstant();
-  } else {
-    // if (%_IsSmi(value)) {
-    //   return false;
-    // } else {
-    //   return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
-    // }
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-
-    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-    Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue = jsgraph()->FalseConstant();
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, effect, if_false),
-        effect, if_false);
-    Node* vfalse = graph()->NewNode(
-        machine()->Uint32LessThanOrEqual(),
-        jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                             vtrue, vfalse, control);
-  }
-  ReplaceWithValue(node, node, effect, control);
-  return Replace(value);
+  return Change(node, simplified()->ObjectIsReceiver());
 }
 
 
@@ -320,15 +232,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
-                effect, control);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
   return Change(node, machine()->Word32Clz());
 }
@@ -420,30 +323,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIsMinusZero(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-
-  Node* double_lo =
-      graph()->NewNode(machine()->Float64ExtractLowWord32(), value);
-  Node* check1 = graph()->NewNode(machine()->Word32Equal(), double_lo,
-                                  jsgraph()->ZeroConstant());
-
-  Node* double_hi =
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value);
-  Node* check2 = graph()->NewNode(
-      machine()->Word32Equal(), double_hi,
-      jsgraph()->Int32Constant(static_cast<int32_t>(0x80000000)));
-
-  ReplaceWithValue(node, node, effect);
-
-  Node* and_result = graph()->NewNode(machine()->Word32And(), check1, check2);
-
-  return Change(node, machine()->Word32Equal(), and_result,
-                jsgraph()->Int32Constant(1));
-}
-
-
 Reduction JSIntrinsicLowering::ReduceFixedArrayGet(Node* node) {
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
@@ -507,12 +386,43 @@
 
 Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
   Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // ToInteger is a no-op on integer values and -0.
   Type* value_type = NodeProperties::GetType(value);
   if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
     ReplaceWithValue(node, value);
     return Replace(value);
   }
-  return NoChange();
+
+  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = value;
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    vfalse = efalse =
+        graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
+                         context, frame_state, efalse, if_false);
+    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue, vfalse, control);
+  // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
+  ReplaceWithValue(node, value, effect, control);
+  return Changed(value);
 }
 
 
@@ -589,20 +499,20 @@
 
 Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kDisallow));
+  NodeProperties::ChangeOp(node,
+                           javascript()->CallFunction(arity, VectorSlotPair(),
+                                                      ConvertReceiverMode::kAny,
+                                                      TailCallMode::kDisallow));
   return Changed(node);
 }
 
 
 Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kAllow));
+  NodeProperties::ChangeOp(node,
+                           javascript()->CallFunction(arity, VectorSlotPair(),
+                                                      ConvertReceiverMode::kAny,
+                                                      TailCallMode::kAllow));
   return Changed(node);
 }
 
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 1977a58..d8e1102 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -44,12 +44,9 @@
   Reduction ReduceDoubleHi(Node* node);
   Reduction ReduceDoubleLo(Node* node);
   Reduction ReduceIncrementStatsCounter(Node* node);
-  Reduction ReduceIsMinusZero(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
-  Reduction ReduceIsFunction(Node* node);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
-  Reduction ReduceJSValueGetValue(Node* node);
   Reduction ReduceMathClz32(Node* node);
   Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathSqrt(Node* node);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index 06cf770..2c11794 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -38,6 +38,8 @@
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kJSLoadContext:
+      return ReduceJSLoadContext(node);
     case IrOpcode::kJSLoadNamed:
       return ReduceJSLoadNamed(node);
     case IrOpcode::kJSStoreNamed:
@@ -52,6 +54,21 @@
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Handle<Context> native_context;
+  // Specialize JSLoadContext(NATIVE_CONTEXT_INDEX) to the known native
+  // context (if any), so we can constant-fold those fields, which is
+  // safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
+  if (access.index() == Context::NATIVE_CONTEXT_INDEX &&
+      GetNativeContext(node).ToHandle(&native_context)) {
+    Node* value = jsgraph()->HeapConstant(native_context);
+    ReplaceWithValue(node, value);
+    return Replace(value);
+  }
+  return NoChange();
+}
 
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
     Node* node, Node* value, MapHandleList const& receiver_maps,
@@ -418,6 +435,7 @@
                        frame_state, exit_effect, exit_control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -443,21 +461,49 @@
 }
 
 
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+    Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
+    AccessMode access_mode, LanguageMode language_mode) {
+  DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+         node->opcode() == IrOpcode::kJSStoreNamed);
+
+  // Check if the {nexus} reports type feedback for the IC.
+  if (nexus.IsUninitialized()) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      // TODO(turbofan): Implement all eager bailout points correctly in
+      // the graph builder.
+      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+        return ReduceSoftDeoptimize(node);
+      }
+    }
+    return NoChange();
+  }
+
+  // Extract receiver maps from the IC using the {nexus}.
+  MapHandleList receiver_maps;
+  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+  DCHECK_LT(0, receiver_maps.length());
+
+  // Try to lower the named access based on the {receiver_maps}.
+  return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+                           language_mode);
+}
+
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
   Node* const value = jsgraph()->Dead();
 
   // Extract receiver maps from the LOAD_IC using the LoadICNexus.
-  MapHandleList receiver_maps;
   if (!p.feedback().IsValid()) return NoChange();
   LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, receiver_maps, p.name(),
-                           AccessMode::kLoad, p.language_mode());
+  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kLoad,
+                           p.language_mode());
 }
 
 
@@ -467,15 +513,12 @@
   Node* const value = NodeProperties::GetValueInput(node, 1);
 
   // Extract receiver maps from the STORE_IC using the StoreICNexus.
-  MapHandleList receiver_maps;
   if (!p.feedback().IsValid()) return NoChange();
   StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, receiver_maps, p.name(),
-                           AccessMode::kStore, p.language_mode());
+  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kStore,
+                           p.language_mode());
 }
 
 
@@ -705,7 +748,7 @@
     Type* element_type = Type::Any();
     MachineType element_machine_type = MachineType::AnyTagged();
     if (IsFastDoubleElementsKind(elements_kind)) {
-      element_type = type_cache_.kFloat64;
+      element_type = Type::Number();
       element_machine_type = MachineType::Float64();
     } else if (IsFastSmiElementsKind(elements_kind)) {
       element_type = type_cache_.kSmi;
@@ -850,6 +893,7 @@
                        frame_state, exit_effect, exit_control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -882,6 +926,20 @@
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
 
+  // Check if the {nexus} reports type feedback for the IC.
+  if (nexus.IsUninitialized()) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      // TODO(turbofan): Implement all eager bailout points correctly in
+      // the graph builder.
+      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+        return ReduceSoftDeoptimize(node);
+      }
+    }
+    return NoChange();
+  }
+
   // Extract receiver maps from the {nexus}.
   MapHandleList receiver_maps;
   if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
@@ -921,6 +979,22 @@
 }
 
 
+Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* deoptimize =
+      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
+                       effect, control);
+  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
+  node->TrimInputCount(0);
+  NodeProperties::ChangeOp(node, common()->Dead());
+  return Changed(node);
+}
+
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
   PropertyAccess const& p = PropertyAccessOf(node->op());
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 45ff87f..4251d72 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -38,7 +38,8 @@
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
+    kBailoutOnUninitialized = 1u << 0,
+    kDeoptimizationEnabled = 1u << 1,
   };
   typedef base::Flags<Flag> Flags;
 
@@ -50,6 +51,7 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
@@ -66,11 +68,17 @@
                               LanguageMode language_mode,
                               KeyedAccessStoreMode store_mode);
   Reduction ReduceNamedAccess(Node* node, Node* value,
+                              FeedbackNexus const& nexus, Handle<Name> name,
+                              AccessMode access_mode,
+                              LanguageMode language_mode);
+  Reduction ReduceNamedAccess(Node* node, Node* value,
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
                               Node* index = nullptr);
 
+  Reduction ReduceSoftDeoptimize(Node* node);
+
   // Adds stability dependencies on all prototypes of every class in
   // {receiver_type} up to (and including) the {holder}.
   void AssumePrototypesStable(Type* receiver_type,
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 1455f0a..5fcd519 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -52,63 +52,6 @@
 }
 
 
-size_t hash_value(TailCallMode mode) {
-  return base::hash_value(static_cast<unsigned>(mode));
-}
-
-
-std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
-  switch (mode) {
-    case TailCallMode::kAllow:
-      return os << "ALLOW_TAIL_CALLS";
-    case TailCallMode::kDisallow:
-      return os << "DISALLOW_TAIL_CALLS";
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
-bool operator==(BinaryOperationParameters const& lhs,
-                BinaryOperationParameters const& rhs) {
-  return lhs.language_mode() == rhs.language_mode() &&
-         lhs.hints() == rhs.hints();
-}
-
-
-bool operator!=(BinaryOperationParameters const& lhs,
-                BinaryOperationParameters const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(BinaryOperationParameters const& p) {
-  return base::hash_combine(p.language_mode(), p.hints());
-}
-
-
-std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
-  return os << p.language_mode() << ", " << p.hints();
-}
-
-
-BinaryOperationParameters const& BinaryOperationParametersOf(
-    Operator const* op) {
-  DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
-         op->opcode() == IrOpcode::kJSBitwiseXor ||
-         op->opcode() == IrOpcode::kJSBitwiseAnd ||
-         op->opcode() == IrOpcode::kJSShiftLeft ||
-         op->opcode() == IrOpcode::kJSShiftRight ||
-         op->opcode() == IrOpcode::kJSShiftRightLogical ||
-         op->opcode() == IrOpcode::kJSAdd ||
-         op->opcode() == IrOpcode::kJSSubtract ||
-         op->opcode() == IrOpcode::kJSMultiply ||
-         op->opcode() == IrOpcode::kJSDivide ||
-         op->opcode() == IrOpcode::kJSModulus);
-  return OpParameter<BinaryOperationParameters>(op);
-}
-
-
 bool operator==(CallConstructParameters const& lhs,
                 CallConstructParameters const& rhs) {
   return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
@@ -138,8 +81,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
-  os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
-     << ", " << p.tail_call_mode();
+  os << p.arity() << ", " << p.convert_mode() << ", " << p.tail_call_mode();
   return os;
 }
 
@@ -216,38 +158,6 @@
 }
 
 
-DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
-    : name_(name), typeof_mode_(typeof_mode) {}
-
-
-bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
-  UNIMPLEMENTED();
-  return true;
-}
-
-
-bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(DynamicAccess const& access) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
-std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
-  return os << Brief(*access.name()) << ", " << access.typeof_mode();
-}
-
-
-DynamicAccess const& DynamicAccessOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
-  return OpParameter<DynamicAccess>(op);
-}
-
-
 bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
   return lhs.name().location() == rhs.name().location() &&
          lhs.language_mode() == rhs.language_mode() &&
@@ -367,32 +277,9 @@
 }
 
 
-bool operator==(CreateArgumentsParameters const& lhs,
-                CreateArgumentsParameters const& rhs) {
-  return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
-}
-
-
-bool operator!=(CreateArgumentsParameters const& lhs,
-                CreateArgumentsParameters const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(CreateArgumentsParameters const& p) {
-  return base::hash_combine(p.type(), p.start_index());
-}
-
-
-std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
-  return os << p.type() << ", " << p.start_index();
-}
-
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
-    const Operator* op) {
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
-  return OpParameter<CreateArgumentsParameters>(op);
+  return OpParameter<CreateArgumentsType>(op);
 }
 
 
@@ -486,12 +373,15 @@
   return OpParameter<CreateLiteralParameters>(op);
 }
 
-
 #define CACHED_OP_LIST(V)                                  \
   V(Equal, Operator::kNoProperties, 2, 1)                  \
   V(NotEqual, Operator::kNoProperties, 2, 1)               \
   V(StrictEqual, Operator::kNoThrow, 2, 1)                 \
   V(StrictNotEqual, Operator::kNoThrow, 2, 1)              \
+  V(LessThan, Operator::kNoProperties, 2, 1)               \
+  V(GreaterThan, Operator::kNoProperties, 2, 1)            \
+  V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
+  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)     \
   V(ToNumber, Operator::kNoProperties, 1, 1)               \
   V(ToString, Operator::kNoProperties, 1, 1)               \
   V(ToName, Operator::kNoProperties, 1, 1)                 \
@@ -512,14 +402,6 @@
   V(CreateWithContext, Operator::kNoProperties, 2, 1)      \
   V(CreateModuleContext, Operator::kNoProperties, 2, 1)
 
-
-#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V)        \
-  V(LessThan, Operator::kNoProperties, 2, 1)        \
-  V(GreaterThan, Operator::kNoProperties, 2, 1)     \
-  V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
-  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
-
-
 struct JSOperatorGlobalCache final {
 #define CACHED(Name, properties, value_input_count, value_output_count)  \
   struct Name##Operator final : public Operator {                        \
@@ -533,25 +415,6 @@
   Name##Operator k##Name##Operator;
   CACHED_OP_LIST(CACHED)
 #undef CACHED
-
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count,        \
-                                  value_output_count)                         \
-  template <LanguageMode kLanguageMode>                                       \
-  struct Name##Operator final : public Operator1<LanguageMode> {              \
-    Name##Operator()                                                          \
-        : Operator1<LanguageMode>(                                            \
-              IrOpcode::kJS##Name, properties, "JS" #Name, value_input_count, \
-              Operator::ZeroIfPure(properties),                               \
-              Operator::ZeroIfEliminatable(properties), value_output_count,   \
-              Operator::ZeroIfPure(properties),                               \
-              Operator::ZeroIfNoThrow(properties), kLanguageMode) {}          \
-  };                                                                          \
-  Name##Operator<SLOPPY> k##Name##SloppyOperator;                             \
-  Name##Operator<STRICT> k##Name##StrictOperator;                             \
-  Name##Operator<STRONG> k##Name##StrongOperator;
-  CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
 };
 
 
@@ -570,156 +433,104 @@
 CACHED_OP_LIST(CACHED)
 #undef CACHED
 
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count,  \
-                                  value_output_count)                   \
-  const Operator* JSOperatorBuilder::Name(LanguageMode language_mode) { \
-    switch (language_mode) {                                            \
-      case SLOPPY:                                                      \
-        return &cache_.k##Name##SloppyOperator;                         \
-      case STRICT:                                                      \
-        return &cache_.k##Name##StrictOperator;                         \
-      case STRONG:                                                      \
-        return &cache_.k##Name##StrongOperator;                         \
-      default:                                                          \
-        break; /* %*!%^$#@ */                                           \
-    }                                                                   \
-    UNREACHABLE();                                                      \
-    return nullptr;                                                     \
-  }
-CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
-
-
-const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
-                                             BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseOr(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseOr, Operator::kNoProperties,       // opcode
-      "JSBitwiseOr",                                         // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSBitwiseOr, Operator::kNoProperties,  // opcode
+      "JSBitwiseOr",                                    // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseXor(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseXor, Operator::kNoProperties,      // opcode
-      "JSBitwiseXor",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSBitwiseXor, Operator::kNoProperties,  // opcode
+      "JSBitwiseXor",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseAnd(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseAnd, Operator::kNoProperties,      // opcode
-      "JSBitwiseAnd",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSBitwiseAnd, Operator::kNoProperties,  // opcode
+      "JSBitwiseAnd",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
-                                             BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftLeft(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSShiftLeft, Operator::kNoProperties,       // opcode
-      "JSShiftLeft",                                         // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSShiftLeft, Operator::kNoProperties,  // opcode
+      "JSShiftLeft",                                    // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftRight(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSShiftRight, Operator::kNoProperties,      // opcode
-      "JSShiftRight",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSShiftRight, Operator::kNoProperties,  // opcode
+      "JSShiftRight",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
 const Operator* JSOperatorBuilder::ShiftRightLogical(
-    LanguageMode language_mode, BinaryOperationHints hints) {
+    BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(     //--
+  return new (zone()) Operator1<BinaryOperationHints>(          //--
       IrOpcode::kJSShiftRightLogical, Operator::kNoProperties,  // opcode
       "JSShiftRightLogical",                                    // name
       2, 1, 1, 1, 1, 2,  // inputs/outputs
-      parameters);       // parameter
+      hints);            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
-                                       BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Add(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSAdd, Operator::kNoProperties,             // opcode
-      "JSAdd",                                               // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSAdd, Operator::kNoProperties,        // opcode
+      "JSAdd",                                          // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
-                                            BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Subtract(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSSubtract, Operator::kNoProperties,        // opcode
-      "JSSubtract",                                          // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSSubtract, Operator::kNoProperties,   // opcode
+      "JSSubtract",                                     // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
-                                            BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Multiply(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSMultiply, Operator::kNoProperties,        // opcode
-      "JSMultiply",                                          // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSMultiply, Operator::kNoProperties,   // opcode
+      "JSMultiply",                                     // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
-                                          BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Divide(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSDivide, Operator::kNoProperties,          // opcode
-      "JSDivide",                                            // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSDivide, Operator::kNoProperties,     // opcode
+      "JSDivide",                                       // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
-                                           BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Modulus(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSModulus, Operator::kNoProperties,         // opcode
-      "JSModulus",                                           // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSModulus, Operator::kNoProperties,    // opcode
+      "JSModulus",                                      // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
 
@@ -732,12 +543,11 @@
       hints);                                           // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CallFunction(
-    size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+    size_t arity, VectorSlotPair const& feedback,
     ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
-  CallFunctionParameters parameters(arity, language_mode, feedback,
-                                    tail_call_mode, convert_mode);
+  CallFunctionParameters parameters(arity, feedback, tail_call_mode,
+                                    convert_mode);
   return new (zone()) Operator1<CallFunctionParameters>(   // --
       IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
       "JSCallFunction",                                    // name
@@ -746,10 +556,22 @@
 }
 
 
+const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
+  const Runtime::Function* f = Runtime::FunctionForId(id);
+  return CallRuntime(f, f->nargs);
+}
+
+
 const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
                                                size_t arity) {
-  CallRuntimeParameters parameters(id, arity);
-  const Runtime::Function* f = Runtime::FunctionForId(parameters.id());
+  const Runtime::Function* f = Runtime::FunctionForId(id);
+  return CallRuntime(f, arity);
+}
+
+
+const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
+                                               size_t arity) {
+  CallRuntimeParameters parameters(f->function_id, arity);
   DCHECK(f->nargs == -1 || f->nargs == static_cast<int>(parameters.arity()));
   return new (zone()) Operator1<CallRuntimeParameters>(   // --
       IrOpcode::kJSCallRuntime, Operator::kNoProperties,  // opcode
@@ -779,11 +601,9 @@
       convert_mode);                                     // parameter
 }
 
-
-const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
-                                             Handle<Name> name,
+const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
                                              const VectorSlotPair& feedback) {
-  NamedAccess access(language_mode, name, feedback);
+  NamedAccess access(SLOPPY, name, feedback);
   return new (zone()) Operator1<NamedAccess>(           // --
       IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
       "JSLoadNamed",                                    // name
@@ -791,10 +611,9 @@
       access);                                          // parameter
 }
 
-
 const Operator* JSOperatorBuilder::LoadProperty(
-    LanguageMode language_mode, VectorSlotPair const& feedback) {
-  PropertyAccess access(language_mode, feedback);
+    VectorSlotPair const& feedback) {
+  PropertyAccess access(SLOPPY, feedback);
   return new (zone()) Operator1<PropertyAccess>(           // --
       IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
       "JSLoadProperty",                                    // name
@@ -882,26 +701,12 @@
 }
 
 
-const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
-                                               TypeofMode typeof_mode) {
-  DynamicAccess access(name, typeof_mode);
-  return new (zone()) Operator1<DynamicAccess>(           // --
-      IrOpcode::kJSLoadDynamic, Operator::kNoProperties,  // opcode
-      "JSLoadDynamic",                                    // name
-      2, 1, 1, 1, 1, 2,                                   // counts
-      access);                                            // parameter
-}
-
-
-const Operator* JSOperatorBuilder::CreateArguments(
-    CreateArgumentsParameters::Type type, int start_index) {
-  DCHECK_IMPLIES(start_index, type == CreateArgumentsParameters::kRestArray);
-  CreateArgumentsParameters parameters(type, start_index);
-  return new (zone()) Operator1<CreateArgumentsParameters>(  // --
-      IrOpcode::kJSCreateArguments, Operator::kNoThrow,      // opcode
-      "JSCreateArguments",                                   // name
-      1, 1, 1, 1, 1, 0,                                      // counts
-      parameters);                                           // parameter
+const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
+  return new (zone()) Operator1<CreateArgumentsType>(    // --
+      IrOpcode::kJSCreateArguments, Operator::kNoThrow,  // opcode
+      "JSCreateArguments",                               // name
+      1, 1, 1, 1, 1, 0,                                  // counts
+      type);                                             // parameter
 }
 
 
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index ca7c7ea..070e71e 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -51,42 +51,6 @@
 ToBooleanHints ToBooleanHintsOf(Operator const* op);
 
 
-// Defines whether tail call optimization is allowed.
-enum class TailCallMode : unsigned { kAllow, kDisallow };
-
-size_t hash_value(TailCallMode);
-
-std::ostream& operator<<(std::ostream&, TailCallMode);
-
-
-// Defines the language mode and hints for a JavaScript binary operations.
-// This is used as parameter by JSAdd, JSSubtract, etc. operators.
-class BinaryOperationParameters final {
- public:
-  BinaryOperationParameters(LanguageMode language_mode,
-                            BinaryOperationHints hints)
-      : language_mode_(language_mode), hints_(hints) {}
-
-  LanguageMode language_mode() const { return language_mode_; }
-  BinaryOperationHints hints() const { return hints_; }
-
- private:
-  LanguageMode const language_mode_;
-  BinaryOperationHints const hints_;
-};
-
-bool operator==(BinaryOperationParameters const&,
-                BinaryOperationParameters const&);
-bool operator!=(BinaryOperationParameters const&,
-                BinaryOperationParameters const&);
-
-size_t hash_value(BinaryOperationParameters const&);
-
-std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
-
-BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
-
-
 // Defines the arity and the feedback for a JavaScript constructor call. This is
 // used as a parameter by JSCallConstruct operators.
 class CallConstructParameters final {
@@ -116,20 +80,15 @@
 // used as a parameter by JSCallFunction operators.
 class CallFunctionParameters final {
  public:
-  CallFunctionParameters(size_t arity, LanguageMode language_mode,
-                         VectorSlotPair const& feedback,
+  CallFunctionParameters(size_t arity, VectorSlotPair const& feedback,
                          TailCallMode tail_call_mode,
                          ConvertReceiverMode convert_mode)
       : bit_field_(ArityField::encode(arity) |
                    ConvertReceiverModeField::encode(convert_mode) |
-                   LanguageModeField::encode(language_mode) |
                    TailCallModeField::encode(tail_call_mode)),
         feedback_(feedback) {}
 
   size_t arity() const { return ArityField::decode(bit_field_); }
-  LanguageMode language_mode() const {
-    return LanguageModeField::decode(bit_field_);
-  }
   ConvertReceiverMode convert_mode() const {
     return ConvertReceiverModeField::decode(bit_field_);
   }
@@ -151,9 +110,8 @@
     return base::hash_combine(p.bit_field_, p.feedback_);
   }
 
-  typedef BitField<size_t, 0, 27> ArityField;
-  typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
-  typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+  typedef BitField<size_t, 0, 29> ArityField;
+  typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
   typedef BitField<TailCallMode, 31, 1> TailCallModeField;
 
   const uint32_t bit_field_;
@@ -221,30 +179,6 @@
 ContextAccess const& ContextAccessOf(Operator const*);
 
 
-// Defines the name for a dynamic variable lookup. This is used as a parameter
-// by JSLoadDynamic and JSStoreDynamic operators.
-class DynamicAccess final {
- public:
-  DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
-
-  const Handle<String>& name() const { return name_; }
-  TypeofMode typeof_mode() const { return typeof_mode_; }
-
- private:
-  const Handle<String> name_;
-  const TypeofMode typeof_mode_;
-};
-
-size_t hash_value(DynamicAccess const&);
-
-bool operator==(DynamicAccess const&, DynamicAccess const&);
-bool operator!=(DynamicAccess const&, DynamicAccess const&);
-
-std::ostream& operator<<(std::ostream&, DynamicAccess const&);
-
-DynamicAccess const& DynamicAccessOf(Operator const*);
-
-
 // Defines the property of an object for a named access. This is
 // used as a parameter by the JSLoadNamed and JSStoreNamed operators.
 class NamedAccess final {
@@ -356,33 +290,8 @@
 PropertyAccess const& PropertyAccessOf(const Operator* op);
 
 
-// Defines specifics about arguments object or rest parameter creation. This is
-// used as a parameter by JSCreateArguments operators.
-class CreateArgumentsParameters final {
- public:
-  enum Type { kMappedArguments, kUnmappedArguments, kRestArray };
-  CreateArgumentsParameters(Type type, int start_index)
-      : type_(type), start_index_(start_index) {}
-
-  Type type() const { return type_; }
-  int start_index() const { return start_index_; }
-
- private:
-  const Type type_;
-  const int start_index_;
-};
-
-bool operator==(CreateArgumentsParameters const&,
-                CreateArgumentsParameters const&);
-bool operator!=(CreateArgumentsParameters const&,
-                CreateArgumentsParameters const&);
-
-size_t hash_value(CreateArgumentsParameters const&);
-
-std::ostream& operator<<(std::ostream&, CreateArgumentsParameters const&);
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
-    const Operator* op);
+// CreateArgumentsType is used as parameter to JSCreateArguments nodes.
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
 
 
 // Defines shared information for the array that should be created. This is
@@ -475,31 +384,21 @@
   const Operator* NotEqual();
   const Operator* StrictEqual();
   const Operator* StrictNotEqual();
-  const Operator* LessThan(LanguageMode language_mode);
-  const Operator* GreaterThan(LanguageMode language_mode);
-  const Operator* LessThanOrEqual(LanguageMode language_mode);
-  const Operator* GreaterThanOrEqual(LanguageMode language_mode);
-  const Operator* BitwiseOr(LanguageMode language_mode,
-                            BinaryOperationHints hints);
-  const Operator* BitwiseXor(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* BitwiseAnd(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* ShiftLeft(LanguageMode language_mode,
-                            BinaryOperationHints hints);
-  const Operator* ShiftRight(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* ShiftRightLogical(LanguageMode language_mode,
-                                    BinaryOperationHints hints);
-  const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
-  const Operator* Subtract(LanguageMode language_mode,
-                           BinaryOperationHints hints);
-  const Operator* Multiply(LanguageMode language_mode,
-                           BinaryOperationHints hints);
-  const Operator* Divide(LanguageMode language_mode,
-                         BinaryOperationHints hints);
-  const Operator* Modulus(LanguageMode language_mode,
-                          BinaryOperationHints hints);
+  const Operator* LessThan();
+  const Operator* GreaterThan();
+  const Operator* LessThanOrEqual();
+  const Operator* GreaterThanOrEqual();
+  const Operator* BitwiseOr(BinaryOperationHints hints);
+  const Operator* BitwiseXor(BinaryOperationHints hints);
+  const Operator* BitwiseAnd(BinaryOperationHints hints);
+  const Operator* ShiftLeft(BinaryOperationHints hints);
+  const Operator* ShiftRight(BinaryOperationHints hints);
+  const Operator* ShiftRightLogical(BinaryOperationHints hints);
+  const Operator* Add(BinaryOperationHints hints);
+  const Operator* Subtract(BinaryOperationHints hints);
+  const Operator* Multiply(BinaryOperationHints hints);
+  const Operator* Divide(BinaryOperationHints hints);
+  const Operator* Modulus(BinaryOperationHints hints);
 
   const Operator* ToBoolean(ToBooleanHints hints);
   const Operator* ToNumber();
@@ -509,8 +408,7 @@
   const Operator* Yield();
 
   const Operator* Create();
-  const Operator* CreateArguments(CreateArgumentsParameters::Type type,
-                                  int start_index);
+  const Operator* CreateArguments(CreateArgumentsType type);
   const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
   const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
                                 PretenureFlag pretenure);
@@ -523,19 +421,18 @@
                                       int literal_flags, int literal_index);
 
   const Operator* CallFunction(
-      size_t arity, LanguageMode language_mode,
-      VectorSlotPair const& feedback = VectorSlotPair(),
+      size_t arity, VectorSlotPair const& feedback = VectorSlotPair(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
+  const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
   const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
 
   const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
 
-  const Operator* LoadProperty(LanguageMode language_mode,
-                               VectorSlotPair const& feedback);
-  const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
-                            VectorSlotPair const& feedback);
+  const Operator* LoadProperty(VectorSlotPair const& feedback);
+  const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback);
 
   const Operator* StoreProperty(LanguageMode language_mode,
                                 VectorSlotPair const& feedback);
@@ -556,9 +453,6 @@
   const Operator* LoadContext(size_t depth, size_t index, bool immutable);
   const Operator* StoreContext(size_t depth, size_t index);
 
-  const Operator* LoadDynamic(const Handle<String>& name,
-                              TypeofMode typeof_mode);
-
   const Operator* TypeOf();
   const Operator* InstanceOf();
 
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 5e0712a..11ae3a9 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -11,7 +11,6 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/state-values-utils.h"
 #include "src/type-cache.h"
 #include "src/types.h"
 
@@ -19,86 +18,6 @@
 namespace internal {
 namespace compiler {
 
-namespace {
-
-// A helper class to construct inline allocations on the simplified operator
-// level. This keeps track of the effect chain for initial stores on a newly
-// allocated object and also provides helpers for commonly allocated objects.
-class AllocationBuilder final {
- public:
-  AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
-      : jsgraph_(jsgraph),
-        allocation_(nullptr),
-        effect_(effect),
-        control_(control) {}
-
-  // Primitive allocation of static size.
-  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
-    effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
-    allocation_ =
-        graph()->NewNode(simplified()->Allocate(pretenure),
-                         jsgraph()->Constant(size), effect_, control_);
-    effect_ = allocation_;
-  }
-
-  // Primitive store into a field.
-  void Store(const FieldAccess& access, Node* value) {
-    effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
-                               value, effect_, control_);
-  }
-
-  // Primitive store into an element.
-  void Store(ElementAccess const& access, Node* index, Node* value) {
-    effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
-                               index, value, effect_, control_);
-  }
-
-  // Compound allocation of a FixedArray.
-  void AllocateArray(int length, Handle<Map> map,
-                     PretenureFlag pretenure = NOT_TENURED) {
-    DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
-           map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
-    int size = (map->instance_type() == FIXED_ARRAY_TYPE)
-                   ? FixedArray::SizeFor(length)
-                   : FixedDoubleArray::SizeFor(length);
-    Allocate(size, pretenure);
-    Store(AccessBuilder::ForMap(), map);
-    Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
-  }
-
-  // Compound store of a constant into a field.
-  void Store(const FieldAccess& access, Handle<Object> value) {
-    Store(access, jsgraph()->Constant(value));
-  }
-
-  void FinishAndChange(Node* node) {
-    NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
-    node->ReplaceInput(0, allocation_);
-    node->ReplaceInput(1, effect_);
-    node->TrimInputCount(2);
-    NodeProperties::ChangeOp(node, common()->FinishRegion());
-  }
-
-  Node* Finish() {
-    return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
-  }
-
- protected:
-  JSGraph* jsgraph() { return jsgraph_; }
-  Graph* graph() { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() { return jsgraph_->common(); }
-  SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- private:
-  JSGraph* const jsgraph_;
-  Node* allocation_;
-  Node* effect_;
-  Node* control_;
-};
-
-}  // namespace
-
-
 // A helper class to simplify the process of reducing a single binop node with a
 // JSOperator. This class manages the rewriting of context, control, and effect
 // dependencies during lowering of a binop and contains numerous helper
@@ -218,17 +137,6 @@
     return ChangeToPureOperator(op, false, type);
   }
 
-  // TODO(turbofan): Strong mode should be killed soonish!
-  bool IsStrong() const {
-    if (node_->opcode() == IrOpcode::kJSLessThan ||
-        node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
-        node_->opcode() == IrOpcode::kJSGreaterThan ||
-        node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
-      return is_strong(OpParameter<LanguageMode>(node_));
-    }
-    return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
-  }
-
   bool LeftInputIs(Type* t) { return left_type()->Is(t); }
 
   bool RightInputIs(Type* t) { return right_type()->Is(t); }
@@ -457,7 +365,7 @@
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  if (r.NeitherInputCanBe(Type::StringOrReceiver()) && !r.IsStrong()) {
+  if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
     r.ConvertInputsToNumber(frame_state);
@@ -499,7 +407,7 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
+  if (numberOp == simplified()->NumberModulus()) {
     if (r.BothInputsAre(Type::Number())) {
       return r.ChangeToPureOperator(numberOp, Type::Number());
     }
@@ -515,13 +423,6 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong()) {
-    if (r.BothInputsAre(Type::Number())) {
-      r.ConvertInputsToUI32(kSigned, kSigned);
-      return r.ChangeToPureOperator(intOp, Type::Integral32());
-    }
-    return NoChange();
-  }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(kSigned, kSigned);
@@ -535,13 +436,6 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong()) {
-    if (r.BothInputsAre(Type::Number())) {
-      r.ConvertInputsToUI32(left_signedness, kUnsigned);
-      return r.ChangeToPureOperator(shift_op);
-    }
-    return NoChange();
-  }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(left_signedness, kUnsigned);
@@ -588,9 +482,6 @@
       less_than_or_equal = machine()->Int32LessThanOrEqual();
     } else {
       // TODO(turbofan): mixed signed/unsigned int32 comparisons.
-      if (r.IsStrong() && !r.BothInputsAre(Type::Number())) {
-        return NoChange();
-      }
       Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
       r.ConvertInputsToNumber(frame_state);
       less_than = simplified()->NumberLessThan();
@@ -780,8 +671,18 @@
       }
     }
   }
-  // Check if we have a cached conversion.
+  // Try constant-folding of JSToNumber with constant inputs.
   Type* input_type = NodeProperties::GetType(input);
+  if (input_type->IsConstant()) {
+    Handle<Object> input_value = input_type->AsConstant()->Value();
+    if (input_value->IsString()) {
+      return Replace(jsgraph()->Constant(
+          String::ToNumber(Handle<String>::cast(input_value))));
+    } else if (input_value->IsOddball()) {
+      return Replace(jsgraph()->Constant(
+          Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
+    }
+  }
   if (input_type->Is(Type::Number())) {
     // JSToNumber(x:number) => x
     return Changed(input);
@@ -1221,7 +1122,7 @@
   // If we need an access check or the object is a Proxy, make a runtime call
   // to finish the lowering.
   Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
-      javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+      javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
       prototype, context, frame_state, effect, control);
 
   control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
@@ -1422,663 +1323,6 @@
 }
 
 
-namespace {
-
-// Maximum instance size for which allocations will be inlined.
-const int kMaxInlineInstanceSize = 64 * kPointerSize;
-
-
-// Checks whether allocation using the given constructor can be inlined.
-bool IsAllocationInlineable(Handle<JSFunction> constructor) {
-  // TODO(bmeurer): Further relax restrictions on inlining, i.e.
-  // instance type and maybe instance size (inobject properties
-  // are limited anyways by the runtime).
-  return constructor->has_initial_map() &&
-         constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
-         constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
-}
-
-}  // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
-  Node* const target = NodeProperties::GetValueInput(node, 0);
-  Type* const target_type = NodeProperties::GetType(target);
-  Node* const new_target = NodeProperties::GetValueInput(node, 1);
-  Node* const effect = NodeProperties::GetEffectInput(node);
-  // TODO(turbofan): Add support for NewTarget passed to JSCreate.
-  if (target != new_target) return NoChange();
-  // Extract constructor function.
-  if (target_type->IsConstant() &&
-      target_type->AsConstant()->Value()->IsJSFunction()) {
-    Handle<JSFunction> constructor =
-        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
-    DCHECK(constructor->IsConstructor());
-    // Force completion of inobject slack tracking before
-    // generating code to finalize the instance size.
-    constructor->CompleteInobjectSlackTrackingIfActive();
-
-    // TODO(bmeurer): We fall back to the runtime in case we cannot inline
-    // the allocation here, which is sort of expensive. We should think about
-    // a soft fallback to some NewObjectCodeStub.
-    if (IsAllocationInlineable(constructor)) {
-      // Compute instance size from initial map of {constructor}.
-      Handle<Map> initial_map(constructor->initial_map(), isolate());
-      int const instance_size = initial_map->instance_size();
-
-      // Add a dependency on the {initial_map} to make sure that this code is
-      // deoptimized whenever the {initial_map} of the {constructor} changes.
-      dependencies()->AssumeInitialMapCantChange(initial_map);
-
-      // Emit code to allocate the JSObject instance for the {constructor}.
-      AllocationBuilder a(jsgraph(), effect, graph()->start());
-      a.Allocate(instance_size);
-      a.Store(AccessBuilder::ForMap(), initial_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(),
-              jsgraph()->EmptyFixedArrayConstant());
-      a.Store(AccessBuilder::ForJSObjectElements(),
-              jsgraph()->EmptyFixedArrayConstant());
-      for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
-        a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
-                jsgraph()->UndefinedConstant());
-      }
-      a.FinishAndChange(node);
-      return Changed(node);
-    }
-  }
-  return NoChange();
-}
-
-
-namespace {
-
-// Retrieves the frame state holding actual argument values.
-Node* GetArgumentsFrameState(Node* frame_state) {
-  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-  FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
-  return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
-             ? outer_state
-             : frame_state;
-}
-
-}  // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
-  CreateArgumentsParameters const& p = CreateArgumentsParametersOf(node->op());
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
-  // Use the ArgumentsAccessStub for materializing both mapped and unmapped
-  // arguments object, but only for non-inlined (i.e. outermost) frames.
-  if (outer_state->opcode() != IrOpcode::kFrameState) {
-    Isolate* isolate = jsgraph()->isolate();
-    int parameter_count = state_info.parameter_count() - 1;
-    int parameter_offset = parameter_count * kPointerSize;
-    int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
-    Node* parameter_pointer = graph()->NewNode(
-        machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
-        jsgraph()->IntPtrConstant(offset));
-
-    if (p.type() != CreateArgumentsParameters::kRestArray) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
-      Callable callable = CodeFactory::ArgumentsAccess(
-          isolate, unmapped, shared->has_duplicate_parameters());
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate, graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState);
-      const Operator* new_op = common()->Call(desc);
-      Node* stub_code = jsgraph()->HeapConstant(callable.code());
-      node->InsertInput(graph()->zone(), 0, stub_code);
-      node->InsertInput(graph()->zone(), 2,
-                        jsgraph()->Constant(parameter_count));
-      node->InsertInput(graph()->zone(), 3, parameter_pointer);
-      NodeProperties::ChangeOp(node, new_op);
-      return Changed(node);
-    } else {
-      Callable callable = CodeFactory::RestArgumentsAccess(isolate);
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate, graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState);
-      const Operator* new_op = common()->Call(desc);
-      Node* stub_code = jsgraph()->HeapConstant(callable.code());
-      node->InsertInput(graph()->zone(), 0, stub_code);
-      node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
-      node->InsertInput(graph()->zone(), 2, parameter_pointer);
-      node->InsertInput(graph()->zone(), 3,
-                        jsgraph()->Constant(p.start_index()));
-      NodeProperties::ChangeOp(node, new_op);
-      return Changed(node);
-    }
-  } else if (outer_state->opcode() == IrOpcode::kFrameState) {
-    // Use inline allocation for all mapped arguments objects within inlined
-    // (i.e. non-outermost) frames, independent of the object size.
-    if (p.type() == CreateArgumentsParameters::kMappedArguments) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      Node* const callee = NodeProperties::GetValueInput(node, 0);
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // TODO(mstarzinger): Duplicate parameters are not handled yet.
-      if (shared->has_duplicate_parameters()) return NoChange();
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by arguments object.
-      bool has_aliased_arguments = false;
-      Node* const elements = AllocateAliasedArguments(
-          effect, control, args_state, context, shared, &has_aliased_arguments);
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
-                                    : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the arguments object.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
-      STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
-      a.Allocate(Heap::kSloppyArgumentsObjectSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
-      a.Store(AccessBuilder::ForArgumentsCallee(), callee);
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
-      // Use inline allocation for all unmapped arguments objects within inlined
-      // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by arguments object.
-      Node* const elements = AllocateArguments(effect, control, args_state);
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::STRICT_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the arguments object.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
-      STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
-      a.Allocate(Heap::kStrictArgumentsObjectSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    } else if (p.type() == CreateArgumentsParameters::kRestArray) {
-      // Use inline allocation for all unmapped arguments objects within inlined
-      // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by the rest array.
-      Node* const elements =
-          AllocateRestArguments(effect, control, args_state, p.start_index());
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the JSArray object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_jsarray_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the jsarray.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
-      // -1 to minus receiver
-      int argument_count = args_state_info.parameter_count() - 1;
-      int length = std::max(0, argument_count - p.start_index());
-      STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-      a.Allocate(JSArray::kSize);
-      a.Store(AccessBuilder::ForMap(), load_jsarray_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
-              jsgraph()->Constant(length));
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    }
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
-                                          int capacity,
-                                          Handle<AllocationSite> site) {
-  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Extract transition and tenuring feedback from the {site} and add
-  // appropriate code dependencies on the {site} if deoptimization is
-  // enabled.
-  PretenureFlag pretenure = site->GetPretenureMode();
-  ElementsKind elements_kind = site->GetElementsKind();
-  DCHECK(IsFastElementsKind(elements_kind));
-  if (flags() & kDeoptimizationEnabled) {
-    dependencies()->AssumeTenuringDecision(site);
-    dependencies()->AssumeTransitionStable(site);
-  }
-
-  // Retrieve the initial map for the array from the appropriate native context.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* js_array_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
-      native_context, native_context, effect);
-
-  // Setup elements and properties.
-  Node* elements;
-  if (capacity == 0) {
-    elements = jsgraph()->EmptyFixedArrayConstant();
-  } else {
-    elements = effect =
-        AllocateElements(effect, control, elements_kind, capacity, pretenure);
-  }
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
-  // Perform the allocation of the actual JSArray object.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.Allocate(JSArray::kSize, pretenure);
-  a.Store(AccessBuilder::ForMap(), js_array_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-  a.Store(AccessBuilder::ForJSObjectElements(), elements);
-  a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
-  CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
-  Node* target = NodeProperties::GetValueInput(node, 0);
-  Node* new_target = NodeProperties::GetValueInput(node, 1);
-
-  // TODO(bmeurer): Optimize the subclassing case.
-  if (target != new_target) return NoChange();
-
-  // Check if we have a feedback {site} on the {node}.
-  Handle<AllocationSite> site = p.site();
-  if (p.site().is_null()) return NoChange();
-
-  // Attempt to inline calls to the Array constructor for the relevant cases
-  // where either no arguments are provided, or exactly one unsigned number
-  // argument is given.
-  if (site->CanInlineCall()) {
-    if (p.arity() == 0) {
-      Node* length = jsgraph()->ZeroConstant();
-      int capacity = JSArray::kPreallocatedArrayElements;
-      return ReduceNewArray(node, length, capacity, site);
-    } else if (p.arity() == 1) {
-      Node* length = NodeProperties::GetValueInput(node, 2);
-      Type* length_type = NodeProperties::GetType(length);
-      if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
-        int capacity = static_cast<int>(length_type->Max());
-        return ReduceNewArray(node, length, capacity, site);
-      }
-    }
-  }
-
-  // Reduce {node} to the appropriate ArrayConstructorStub backend.
-  // Note that these stubs "behave" like JSFunctions, which means they
-  // expect a receiver on the stack, which they remove. We just push
-  // undefined for the receiver.
-  ElementsKind elements_kind = site->GetElementsKind();
-  AllocationSiteOverrideMode override_mode =
-      (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
-          ? DISABLE_ALLOCATION_SITES
-          : DONT_OVERRIDE;
-  if (p.arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
-                                        override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
-        CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  } else if (p.arity() == 1) {
-    // TODO(bmeurer): Optimize for the 0 length non-holey case?
-    ArraySingleArgumentConstructorStub stub(
-        isolate(), GetHoleyElementsKind(elements_kind), override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
-        CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  } else {
-    int const arity = static_cast<int>(p.arity());
-    ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
-                                        override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
-        arity + 1, CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  }
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
-  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-  Handle<SharedFunctionInfo> shared = p.shared_info();
-
-  // Use the FastNewClosureStub that allocates in new space only for nested
-  // functions that don't need literals cloning.
-  if (p.pretenure() == NOT_TENURED && shared->num_literals() == 0) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastNewClosure(
-        isolate, shared->language_mode(), shared->kind());
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 1, jsgraph()->HeapConstant(shared));
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* done = NodeProperties::GetValueInput(node, 1);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-
-  // Load the JSIteratorResult map for the {context}.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* iterator_result_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
-      native_context, native_context, effect);
-
-  // Emit code to allocate the JSIteratorResult instance.
-  AllocationBuilder a(jsgraph(), effect, graph()->start());
-  a.Allocate(JSIteratorResult::kSize);
-  a.Store(AccessBuilder::ForMap(), iterator_result_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(),
-          jsgraph()->EmptyFixedArrayConstant());
-  a.Store(AccessBuilder::ForJSObjectElements(),
-          jsgraph()->EmptyFixedArrayConstant());
-  a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
-  a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
-  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
-  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
-  int const length = constants->length();
-  int const flags = p.flags();
-
-  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
-  // initial length limit for arrays with "fast" elements kind.
-  // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
-  if ((flags & ArrayLiteral::kShallowElements) != 0 &&
-      (flags & ArrayLiteral::kIsStrong) == 0 &&
-      length < JSArray::kInitialMaxFastElementArray) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastCloneShallowArray(isolate);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
-            ? CallDescriptor::kNeedsFrameState
-            : CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    Node* literal_index = jsgraph()->SmiConstant(p.index());
-    Node* constant_elements = jsgraph()->HeapConstant(constants);
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 2, literal_index);
-    node->InsertInput(graph()->zone(), 3, constant_elements);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
-  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
-  // Constants are pairs, see ObjectLiteral::properties_count().
-  int const length = constants->length() / 2;
-  int const flags = p.flags();
-
-  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
-  // elements up to the number of properties that the stubs can handle.
-  if ((flags & ObjectLiteral::kShallowProperties) != 0 &&
-      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastCloneShallowObject(isolate, length);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
-            ? CallDescriptor::kNeedsFrameState
-            : CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    Node* literal_index = jsgraph()->SmiConstant(p.index());
-    Node* literal_flags = jsgraph()->SmiConstant(flags);
-    Node* constant_elements = jsgraph()->HeapConstant(constants);
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 2, literal_index);
-    node->InsertInput(graph()->zone(), 3, constant_elements);
-    node->InsertInput(graph()->zone(), 4, literal_flags);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
-  int slot_count = OpParameter<int>(node->op());
-  Node* const closure = NodeProperties::GetValueInput(node, 0);
-
-  // Use inline allocation for function contexts up to a size limit.
-  if (slot_count < kFunctionContextAllocationLimit) {
-    // JSCreateFunctionContext[slot_count < limit]](fun)
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* control = NodeProperties::GetControlInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* extension = jsgraph()->TheHoleConstant();
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    AllocationBuilder a(jsgraph(), effect, control);
-    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-    int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
-    a.AllocateArray(context_length, factory()->function_context_map());
-    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
-    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
-      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
-    }
-    RelaxControls(node);
-    a.FinishAndChange(node);
-    return Changed(node);
-  }
-
-  // Use the FastNewContextStub only for function contexts up maximum size.
-  if (slot_count <= FastNewContextStub::kMaximumSlots) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
-  Node* object = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-  a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
-  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
-  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
-  Handle<String> name = OpParameter<Handle<String>>(node);
-  Node* exception = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-  a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
-                  factory()->catch_context_map());
-  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
-  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
-  a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
-          exception);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
-  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
-  int context_length = scope_info->ContextLength();
-  Node* const closure = NodeProperties::GetValueInput(node, 0);
-
-  // Use inline allocation for block contexts up to a size limit.
-  if (context_length < kBlockContextAllocationLimit) {
-    // JSCreateBlockContext[scope[length < limit]](fun)
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* control = NodeProperties::GetControlInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* extension = jsgraph()->Constant(scope_info);
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    AllocationBuilder a(jsgraph(), effect, control);
-    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-    a.AllocateArray(context_length, factory()->block_context_map());
-    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
-    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
-      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
-    }
-    RelaxControls(node);
-    a.FinishAndChange(node);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
 Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
   CallConstructParameters const& p = CallConstructParametersOf(node->op());
@@ -2252,9 +1496,8 @@
   // Maybe we did at least learn something about the {receiver}.
   if (p.convert_mode() != convert_mode) {
     NodeProperties::ChangeOp(
-        node,
-        javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
-                                   convert_mode, p.tail_call_mode()));
+        node, javascript()->CallFunction(p.arity(), p.feedback(), convert_mode,
+                                         p.tail_call_mode()));
     return Changed(node);
   }
 
@@ -2270,159 +1513,6 @@
 }
 
 
-Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
-  Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Get the set of properties to enumerate.
-  Node* cache_type = effect = graph()->NewNode(
-      javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), receiver,
-      context, frame_state, effect, control);
-  control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
-  Node* receiver_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       receiver, effect, control);
-  Node* cache_type_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       cache_type, effect, control);
-  Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
-
-  // If we got a map from the GetPropertyNamesFast runtime call, we can do a
-  // fast modification check. Otherwise, we got a fixed array, and we have to
-  // perform a slow check on every iteration.
-  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                                  cache_type_map, meta_map);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* cache_array_true0;
-  Node* cache_length_true0;
-  Node* cache_type_true0;
-  Node* etrue0;
-  {
-    // Enum cache case.
-    Node* cache_type_enum_length = etrue0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
-        effect, if_true0);
-    cache_length_true0 = graph()->NewNode(
-        simplified()->NumberBitwiseAnd(), cache_type_enum_length,
-        jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
-
-    Node* check1 =
-        graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
-                         jsgraph()->Int32Constant(0));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* cache_array_true1;
-    Node* etrue1;
-    {
-      // No properties to enumerate.
-      cache_array_true1 =
-          jsgraph()->HeapConstant(factory()->empty_fixed_array());
-      etrue1 = etrue0;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* cache_array_false1;
-    Node* efalse1;
-    {
-      // Load the enumeration cache from the instance descriptors of {receiver}.
-      Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
-          receiver_map, etrue0, if_false1);
-      Node* object_map_enum_cache = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
-          receiver_map_descriptors, efalse1, if_false1);
-      cache_array_false1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(
-              AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
-          object_map_enum_cache, efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    cache_array_true0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                         cache_array_true1, cache_array_false1, if_true0);
-
-    cache_type_true0 = cache_type;
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* cache_array_false0;
-  Node* cache_length_false0;
-  Node* cache_type_false0;
-  Node* efalse0;
-  {
-    // FixedArray case.
-    cache_type_false0 = jsgraph()->OneConstant();  // Smi means slow check
-    cache_array_false0 = cache_type;
-    cache_length_false0 = efalse0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
-        cache_array_false0, effect, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* cache_array =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_array_true0, cache_array_false0, control);
-  Node* cache_length =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_length_true0, cache_length_false0, control);
-  cache_type =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_type_true0, cache_type_false0, control);
-
-  for (auto edge : node->use_edges()) {
-    Node* const use = edge.from();
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(effect);
-      Revisit(use);
-    } else {
-      if (NodeProperties::IsControlEdge(edge)) {
-        if (use->opcode() == IrOpcode::kIfSuccess) {
-          Replace(use, control);
-        } else if (use->opcode() == IrOpcode::kIfException) {
-          edge.UpdateTo(cache_type_true0);
-          continue;
-        } else {
-          UNREACHABLE();
-        }
-      } else {
-        DCHECK(NodeProperties::IsValueEdge(edge));
-        DCHECK_EQ(IrOpcode::kProjection, use->opcode());
-        switch (ProjectionIndexOf(use->op())) {
-          case 0:
-            Replace(use, cache_type);
-            break;
-          case 1:
-            Replace(use, cache_array);
-            break;
-          case 2:
-            Replace(use, cache_length);
-            break;
-          default:
-            UNREACHABLE();
-            break;
-        }
-      }
-      use->Kill();
-    }
-  }
-  return NoChange();  // All uses were replaced already above.
-}
-
-
 Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -2464,38 +1554,12 @@
   Node* efalse0;
   Node* vfalse0;
   {
-    // Check if the {cache_type} is zero, which indicates proxy.
-    Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                                    cache_type, jsgraph()->ZeroConstant());
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1;
-    Node* vtrue1;
-    {
-      // Don't do filtering for proxies.
-      etrue1 = effect;
-      vtrue1 = key;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1;
-    Node* vfalse1;
-    {
-      // Filter the {key} to check if it's still a valid property of the
-      // {receiver} (does the ToName conversion implicitly).
-      vfalse1 = efalse1 = graph()->NewNode(
-          javascript()->CallRuntime(Runtime::kForInFilter, 2), receiver, key,
-          context, frame_state, effect, if_false1);
-      if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue1, vfalse1, if_false0);
+    // Filter the {key} to check if it's still a valid property of the
+    // {receiver} (does the ToName conversion implicitly).
+    vfalse0 = efalse0 = graph()->NewNode(
+        javascript()->CallRuntime(Runtime::kForInFilter), receiver, key,
+        context, frame_state, effect, if_false0);
+    if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
   }
 
   control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@@ -2640,28 +1704,6 @@
       return ReduceJSStoreContext(node);
     case IrOpcode::kJSConvertReceiver:
       return ReduceJSConvertReceiver(node);
-    case IrOpcode::kJSCreate:
-      return ReduceJSCreate(node);
-    case IrOpcode::kJSCreateArguments:
-      return ReduceJSCreateArguments(node);
-    case IrOpcode::kJSCreateArray:
-      return ReduceJSCreateArray(node);
-    case IrOpcode::kJSCreateClosure:
-      return ReduceJSCreateClosure(node);
-    case IrOpcode::kJSCreateIterResultObject:
-      return ReduceJSCreateIterResultObject(node);
-    case IrOpcode::kJSCreateLiteralArray:
-      return ReduceJSCreateLiteralArray(node);
-    case IrOpcode::kJSCreateLiteralObject:
-      return ReduceJSCreateLiteralObject(node);
-    case IrOpcode::kJSCreateFunctionContext:
-      return ReduceJSCreateFunctionContext(node);
-    case IrOpcode::kJSCreateWithContext:
-      return ReduceJSCreateWithContext(node);
-    case IrOpcode::kJSCreateCatchContext:
-      return ReduceJSCreateCatchContext(node);
-    case IrOpcode::kJSCreateBlockContext:
-      return ReduceJSCreateBlockContext(node);
     case IrOpcode::kJSCallConstruct:
       return ReduceJSCallConstruct(node);
     case IrOpcode::kJSCallFunction:
@@ -2670,8 +1712,6 @@
       return ReduceJSForInDone(node);
     case IrOpcode::kJSForInNext:
       return ReduceJSForInNext(node);
-    case IrOpcode::kJSForInPrepare:
-      return ReduceJSForInPrepare(node);
     case IrOpcode::kJSForInStep:
       return ReduceJSForInStep(node);
     case IrOpcode::kSelect:
@@ -2690,139 +1730,6 @@
 }
 
 
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
-                                         Node* frame_state) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto parameters_it = ++parameters_access.begin();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < argument_count; ++i, ++parameters_it) {
-    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
-  }
-  return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
-                                             Node* frame_state,
-                                             int start_index) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  int num_elements = std::max(0, argument_count - start_index);
-  if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto parameters_it = ++parameters_access.begin();
-
-  // Skip unused arguments.
-  for (int i = 0; i < start_index; i++) {
-    ++parameters_it;
-  }
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(num_elements, factory()->fixed_array_map());
-  for (int i = 0; i < num_elements; ++i, ++parameters_it) {
-    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
-  }
-  return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray serving as a parameter map for values
-// recorded in the given {frame_state}. Some elements map to slots within the
-// given {context}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateAliasedArguments(
-    Node* effect, Node* control, Node* frame_state, Node* context,
-    Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // If there is no aliasing, the arguments object elements are not special in
-  // any way, we can just return an unmapped backing store instead.
-  int parameter_count = shared->internal_formal_parameter_count();
-  if (parameter_count == 0) {
-    return AllocateArguments(effect, control, frame_state);
-  }
-
-  // Calculate number of argument values being aliased/mapped.
-  int mapped_count = Min(argument_count, parameter_count);
-  *has_aliased_arguments = true;
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto paratemers_it = ++parameters_access.begin();
-
-  // The unmapped argument values recorded in the frame state are stored yet
-  // another indirection away and then linked into the parameter map below,
-  // whereas mapped argument values are replaced with a hole instead.
-  AllocationBuilder aa(jsgraph(), effect, control);
-  aa.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
-  }
-  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
-  }
-  Node* arguments = aa.Finish();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), arguments, control);
-  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
-  a.Store(AccessBuilder::ForFixedArraySlot(0), context);
-  a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
-  for (int i = 0; i < mapped_count; ++i) {
-    int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
-    a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
-  }
-  return a.Finish();
-}
-
-
-Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
-                                        ElementsKind elements_kind,
-                                        int capacity, PretenureFlag pretenure) {
-  DCHECK_LE(1, capacity);
-  DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
-
-  Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
-                                 ? factory()->fixed_double_array_map()
-                                 : factory()->fixed_array_map();
-  ElementAccess access = IsFastDoubleElementsKind(elements_kind)
-                             ? AccessBuilder::ForFixedDoubleArrayElement()
-                             : AccessBuilder::ForFixedArrayElement();
-  Node* value =
-      IsFastDoubleElementsKind(elements_kind)
-          ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
-          : jsgraph()->TheHoleConstant();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(capacity, elements_map, pretenure);
-  for (int i = 0; i < capacity; ++i) {
-    Node* index = jsgraph()->Constant(i);
-    a.Store(access, index, value);
-  }
-  return a.Finish();
-}
-
-
 Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
 
 
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 68ce74e..4621a45 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -68,41 +68,18 @@
   Reduction ReduceJSToString(Node* node);
   Reduction ReduceJSToObject(Node* node);
   Reduction ReduceJSConvertReceiver(Node* node);
-  Reduction ReduceJSCreate(Node* node);
-  Reduction ReduceJSCreateArguments(Node* node);
-  Reduction ReduceJSCreateArray(Node* node);
-  Reduction ReduceJSCreateClosure(Node* node);
-  Reduction ReduceJSCreateIterResultObject(Node* node);
-  Reduction ReduceJSCreateLiteralArray(Node* node);
-  Reduction ReduceJSCreateLiteralObject(Node* node);
-  Reduction ReduceJSCreateFunctionContext(Node* node);
-  Reduction ReduceJSCreateWithContext(Node* node);
-  Reduction ReduceJSCreateCatchContext(Node* node);
-  Reduction ReduceJSCreateBlockContext(Node* node);
   Reduction ReduceJSCallConstruct(Node* node);
   Reduction ReduceJSCallFunction(Node* node);
   Reduction ReduceJSForInDone(Node* node);
   Reduction ReduceJSForInNext(Node* node);
-  Reduction ReduceJSForInPrepare(Node* node);
   Reduction ReduceJSForInStep(Node* node);
   Reduction ReduceSelect(Node* node);
   Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
   Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
   Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
                             const Operator* shift_op);
-  Reduction ReduceNewArray(Node* node, Node* length, int capacity,
-                           Handle<AllocationSite> site);
 
   Node* Word32Shl(Node* const lhs, int32_t const rhs);
-  Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
-  Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
-                              int start_index);
-  Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
-                                 Node* context, Handle<SharedFunctionInfo>,
-                                 bool* has_aliased_arguments);
-  Node* AllocateElements(Node* effect, Node* control,
-                         ElementsKind elements_kind, int capacity,
-                         PretenureFlag pretenure);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -115,10 +92,6 @@
   CompilationDependencies* dependencies() const;
   Flags flags() const { return flags_; }
 
-  // Limits up to which context allocations are inlined.
-  static const int kFunctionContextAllocationLimit = 16;
-  static const int kBlockContextAllocationLimit = 16;
-
   CompilationDependencies* dependencies_;
   Flags flags_;
   JSGraph* jsgraph_;
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index 7b53b5c..5abd346 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -53,10 +53,10 @@
   RpoNumber onstack() { return RpoNumber::FromInt(-2); }
 };
 
-
 bool JumpThreading::ComputeForwarding(Zone* local_zone,
                                       ZoneVector<RpoNumber>& result,
-                                      InstructionSequence* code) {
+                                      InstructionSequence* code,
+                                      bool frame_at_start) {
   ZoneStack<RpoNumber> stack(local_zone);
   JumpThreadingState state = {false, result, stack};
   state.Clear(code->InstructionBlockCount());
@@ -91,7 +91,14 @@
         } else if (instr->arch_opcode() == kArchJmp) {
           // try to forward the jump instruction.
           TRACE("  jmp\n");
-          fw = code->InputRpo(instr, 0);
+          // if this block deconstructs the frame, we can't forward it.
+          // TODO(mtrofin): we can still forward if we end up building
+          // the frame at start. So we should move the decision of whether
+          // to build a frame or not in the register allocator, and trickle it
+          // here and to the code generator.
+          if (frame_at_start || !block->must_deconstruct_frame()) {
+            fw = code->InputRpo(instr, 0);
+          }
           fallthru = false;
         } else {
           // can't skip other instructions.
diff --git a/src/compiler/jump-threading.h b/src/compiler/jump-threading.h
index fa74ee9..84520ba 100644
--- a/src/compiler/jump-threading.h
+++ b/src/compiler/jump-threading.h
@@ -18,7 +18,7 @@
   // Compute the forwarding map of basic blocks to their ultimate destination.
   // Returns {true} if there is at least one block that is forwarded.
   static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
-                                InstructionSequence* code);
+                                InstructionSequence* code, bool frame_at_start);
 
   // Rewrite the instructions to forward jumps and branches.
   // May also negate some branches.
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 2eef929..d4a3665 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -63,9 +63,6 @@
     case CallDescriptor::kCallAddress:
       os << "Addr";
       break;
-    case CallDescriptor::kLazyBailout:
-      os << "LazyBail";
-      break;
   }
   return os;
 }
@@ -120,14 +117,7 @@
 
 
 CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
-  if (info->code_stub() != nullptr) {
-    // Use the code stub interface descriptor.
-    CodeStub* stub = info->code_stub();
-    CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
-    return GetStubCallDescriptor(
-        info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
-        CallDescriptor::kNoFlags, Operator::kNoProperties);
-  }
+  DCHECK(!info->IsStub());
   if (info->has_literal()) {
     // If we already have the function literal, use the number of parameters
     // plus the receiver.
@@ -155,13 +145,14 @@
   switch (function) {
     case Runtime::kAllocateInTargetSpace:
     case Runtime::kCreateIterResultObject:
-    case Runtime::kDefineClassMethod:              // TODO(jarin): Is it safe?
+    case Runtime::kDefineDataPropertyInLiteral:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kFinalizeClassDefinition:        // TODO(conradw): Is it safe?
     case Runtime::kForInDone:
     case Runtime::kForInStep:
     case Runtime::kGetSuperConstructor:
+    case Runtime::kIsFunction:
     case Runtime::kNewClosure:
     case Runtime::kNewClosure_Tenured:
     case Runtime::kNewFunctionContext:
@@ -174,8 +165,6 @@
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return 0;
-    case Runtime::kInlineArguments:
-    case Runtime::kInlineArgumentsLength:
     case Runtime::kInlineGetPrototype:
     case Runtime::kInlineRegExpConstructResult:
     case Runtime::kInlineRegExpExec:
@@ -242,6 +231,9 @@
   if (locations.return_count_ > 1) {
     locations.AddReturn(regloc(kReturnRegister1));
   }
+  if (locations.return_count_ > 2) {
+    locations.AddReturn(regloc(kReturnRegister2));
+  }
   for (size_t i = 0; i < return_count; i++) {
     types.AddReturn(MachineType::AnyTagged());
   }
@@ -287,31 +279,6 @@
 }
 
 
-CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
-  const size_t return_count = 0;
-  const size_t parameter_count = 0;
-
-  LocationSignature::Builder locations(zone, return_count, parameter_count);
-  MachineSignature::Builder types(zone, return_count, parameter_count);
-
-  // The target is ignored, but we need to give some values here.
-  MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = regloc(kJSFunctionRegister);
-  return new (zone) CallDescriptor(      // --
-      CallDescriptor::kLazyBailout,      // kind
-      target_type,                       // target MachineType
-      target_loc,                        // target location
-      types.Build(),                     // machine_sig
-      locations.Build(),                 // location_sig
-      0,                                 // stack_parameter_count
-      Operator::kNoThrow,                // properties
-      kNoCalleeSaved,                    // callee-saved
-      kNoCalleeSaved,                    // callee-saved fp
-      CallDescriptor::kNeedsFrameState,  // flags
-      "lazy-bailout");
-}
-
-
 CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
                                              int js_parameter_count,
                                              CallDescriptor::Flags flags) {
@@ -350,10 +317,10 @@
 
   // The target for JS function calls is the JSFunction object.
   MachineType target_type = MachineType::AnyTagged();
-  // TODO(titzer): When entering into an OSR function from unoptimized code,
-  // the JSFunction is not in a register, but it is on the stack in an
-  // unaddressable spill slot. We hack this in the OSR prologue. Fix.
-  LinkageLocation target_loc = regloc(kJSFunctionRegister);
+  // When entering into an OSR function from unoptimized code the JSFunction
+  // is not in a register, but it is on the stack in the marker spill slot.
+  LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
+                                      : regloc(kJSFunctionRegister);
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallJSFunction,  // kind
       target_type,                      // target MachineType
@@ -369,60 +336,6 @@
       "js-call");
 }
 
-
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
-  MachineSignature::Builder types(zone, 0, 6);
-  LocationSignature::Builder locations(zone, 0, 6);
-
-  // Add registers for fixed parameters passed via interpreter dispatch.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kInterpreterAccumulatorRegister));
-
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  types.AddParam(MachineType::Pointer());
-  locations.AddParam(regloc(kInterpreterRegisterFileRegister));
-
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  types.AddParam(MachineType::IntPtr());
-  locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
-
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
-
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  types.AddParam(MachineType::Pointer());
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
-  // TODO(rmcilroy): Make the context param the one spilled to the stack once
-  // Turbofan supports modified stack arguments in tail calls.
-  locations.AddParam(
-      LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
-#else
-  locations.AddParam(regloc(kInterpreterDispatchTableRegister));
-#endif
-
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kContextRegister));
-
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
-  return new (zone) CallDescriptor(         // --
-      CallDescriptor::kCallCodeObject,      // kind
-      MachineType::None(),                  // target MachineType
-      target_loc,                           // target location
-      types.Build(),                        // machine_sig
-      locations.Build(),                    // location_sig
-      0,                                    // stack_parameter_count
-      Operator::kNoProperties,              // properties
-      kNoCalleeSaved,                       // callee-saved registers
-      kNoCalleeSaved,                       // callee-saved fp regs
-      CallDescriptor::kSupportsTailCalls |  // flags
-          CallDescriptor::kCanUseRoots,     // flags
-      "interpreter-dispatch");
-}
-
-
 // TODO(all): Add support for return representations/locations to
 // CallInterfaceDescriptor.
 // TODO(turbofan): cache call descriptors for code stub calls.
@@ -448,6 +361,9 @@
   if (locations.return_count_ > 1) {
     locations.AddReturn(regloc(kReturnRegister1));
   }
+  if (locations.return_count_ > 2) {
+    locations.AddReturn(regloc(kReturnRegister2));
+  }
   for (size_t i = 0; i < return_count; i++) {
     types.AddReturn(return_type);
   }
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 252f044..3012f56 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -76,6 +76,12 @@
                               kPointerSize);
   }
 
+  static LinkageLocation ForSavedCallerMarker() {
+    return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+                               StandardFrameConstants::kMarkerOffset) /
+                              kPointerSize);
+  }
+
   static LinkageLocation ConvertToTailCallerLocation(
       LinkageLocation caller_location, int stack_param_delta) {
     if (!caller_location.IsRegister()) {
@@ -140,8 +146,7 @@
   enum Kind {
     kCallCodeObject,  // target is a Code object
     kCallJSFunction,  // target is a JSFunction object
-    kCallAddress,     // target is a machine pointer
-    kLazyBailout      // the call is no-op, only used for lazy bailout
+    kCallAddress      // target is a machine pointer
   };
 
   enum Flag {
@@ -153,9 +158,12 @@
     kHasLocalCatchHandler = 1u << 4,
     kSupportsTailCalls = 1u << 5,
     kCanUseRoots = 1u << 6,
-    // Indicates that the native stack should be used for a code object. This
-    // information is important for native calls on arm64.
+    // (arm64 only) native stack should be used for arguments.
     kUseNativeStack = 1u << 7,
+    // (arm64 only) call instruction has to restore JSSP.
+    kRestoreJSSP = 1u << 8,
+    // Causes the code generator to initialize the root register.
+    kInitializeRootRegister = 1u << 9,
     kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
   };
   typedef base::Flags<Flag> Flags;
@@ -222,6 +230,9 @@
   bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
   bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
   bool UseNativeStack() const { return flags() & kUseNativeStack; }
+  bool InitializeRootRegister() const {
+    return flags() & kInitializeRootRegister;
+  }
 
   LinkageLocation GetReturnLocation(size_t index) const {
     return location_sig_->GetReturn(index);
@@ -313,8 +324,6 @@
       Zone* zone, Runtime::FunctionId function, int parameter_count,
       Operator::Properties properties, CallDescriptor::Flags flags);
 
-  static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
-
   static CallDescriptor* GetStubCallDescriptor(
       Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
       int stack_parameter_count, CallDescriptor::Flags flags,
@@ -326,13 +335,9 @@
   // for the host platform. This simplified calling convention only supports
   // integers and pointers of one word size each, i.e. no floating point,
   // structs, pointers to members, etc.
-  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
-                                                  const MachineSignature* sig);
-
-  // Creates a call descriptor for interpreter handler code stubs. These are not
-  // intended to be called directly but are instead dispatched to by the
-  // interpreter.
-  static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
+  static CallDescriptor* GetSimplifiedCDescriptor(
+      Zone* zone, const MachineSignature* sig,
+      bool set_initialize_root_flag = false);
 
   // Get the location of an (incoming) parameter to this function.
   LinkageLocation GetParameterLocation(int index) const {
@@ -383,15 +388,6 @@
   // A special {OsrValue} index to indicate the context spill slot.
   static const int kOsrContextSpillSlotIndex = -1;
 
-  // Special parameter indices used to pass fixed register data through
-  // interpreter dispatches.
-  static const int kInterpreterAccumulatorParameter = 0;
-  static const int kInterpreterRegisterFileParameter = 1;
-  static const int kInterpreterBytecodeOffsetParameter = 2;
-  static const int kInterpreterBytecodeArrayParameter = 3;
-  static const int kInterpreterDispatchTableParameter = 4;
-  static const int kInterpreterContextParameter = 5;
-
  private:
   CallDescriptor* const incoming_;
 
diff --git a/src/compiler/live-range-separator.cc b/src/compiler/live-range-separator.cc
index 980c944..e3cd0a3 100644
--- a/src/compiler/live-range-separator.cc
+++ b/src/compiler/live-range-separator.cc
@@ -119,8 +119,10 @@
 
 
 void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+  const InstructionSequence *code = data()->code();
   for (TopLevelLiveRange *top : data()->live_ranges()) {
-    if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+    if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
+        top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
       continue;
     }
 
@@ -131,7 +133,10 @@
         break;
       }
     }
-    if (child == nullptr) top->MarkSpilledInDeferredBlock();
+    if (child == nullptr) {
+      top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
+                                         code->InstructionBlockCount());
+    }
   }
 }
 
diff --git a/src/compiler/liveness-analyzer.h b/src/compiler/liveness-analyzer.h
index 1e2f85b..9b09724 100644
--- a/src/compiler/liveness-analyzer.h
+++ b/src/compiler/liveness-analyzer.h
@@ -85,6 +85,10 @@
   void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
   void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
   void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
+  LivenessAnalyzerBlock* GetPredecessor() {
+    DCHECK(predecessors_.size() == 1);
+    return predecessors_[0];
+  }
 
  private:
   class Entry {
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 511a10d..3b6f21b 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -91,6 +91,10 @@
   return OpParameter<CheckedStoreRepresentation>(op);
 }
 
+MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
+  return OpParameter<MachineRepresentation>(op);
+}
 
 #define PURE_OP_LIST(V)                                                       \
   V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
@@ -144,13 +148,17 @@
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)              \
   V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)              \
   V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                    \
+  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
   V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
   V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                     \
@@ -186,11 +194,14 @@
   V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                 \
   V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
   V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
+  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
   V(Word64Ctz, Operator::kNoProperties, 1, 0, 1)            \
+  V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
+  V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
   V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1)         \
   V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1)         \
   V(Float32Max, Operator::kNoProperties, 2, 0, 1)           \
@@ -207,10 +218,10 @@
   V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
   V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
 
-
 #define MACHINE_TYPE_LIST(V) \
   V(Float32)                 \
   V(Float64)                 \
+  V(Simd128)                 \
   V(Int8)                    \
   V(Uint8)                   \
   V(Int16)                   \
@@ -222,17 +233,16 @@
   V(Pointer)                 \
   V(AnyTagged)
 
-
 #define MACHINE_REPRESENTATION_LIST(V) \
   V(kFloat32)                          \
   V(kFloat64)                          \
+  V(kSimd128)                          \
   V(kWord8)                            \
   V(kWord16)                           \
   V(kWord32)                           \
   V(kWord64)                           \
   V(kTagged)
 
-
 struct MachineOperatorGlobalCache {
 #define PURE(Name, properties, value_input_count, control_input_count,         \
              output_count)                                                     \
@@ -279,6 +289,18 @@
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
+#define STACKSLOT(Type)                                                       \
+  struct StackSlot##Type##Operator final                                      \
+      : public Operator1<MachineRepresentation> {                             \
+    StackSlot##Type##Operator()                                               \
+        : Operator1<MachineRepresentation>(                                   \
+              IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
+              1, 0, 0, MachineType::Type().representation()) {}               \
+  };                                                                          \
+  StackSlot##Type##Operator kStackSlot##Type;
+  MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+
 #define STORE(Type)                                                            \
   struct Store##Type##Operator : public Operator1<StoreRepresentation> {       \
     explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)        \
@@ -379,6 +401,16 @@
   return nullptr;
 }
 
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+#define STACKSLOT(Type)                              \
+  if (rep == MachineType::Type().representation()) { \
+    return &cache_.kStackSlot##Type;                 \
+  }
+  MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+  UNREACHABLE();
+  return nullptr;
+}
 
 const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
   switch (store_rep.representation()) {
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 00fefe3..c5a80aa 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -102,6 +102,7 @@
 
 CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
 
+MachineRepresentation StackSlotRepresentationOf(Operator const* op);
 
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
@@ -134,12 +135,15 @@
     kWord64Ctz = 1u << 17,
     kWord32Popcnt = 1u << 18,
     kWord64Popcnt = 1u << 19,
+    kWord32ReverseBits = 1u << 20,
+    kWord64ReverseBits = 1u << 21,
     kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
                       kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
                       kFloat64RoundUp | kFloat32RoundTruncate |
                       kFloat64RoundTruncate | kFloat64RoundTiesAway |
                       kFloat32RoundTiesEven | kFloat64RoundTiesEven |
-                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
+                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+                      kWord32ReverseBits | kWord64ReverseBits
   };
   typedef base::Flags<Flag, unsigned> Flags;
 
@@ -160,6 +164,8 @@
   const OptionalOperator Word32Ctz();
   const OptionalOperator Word32Popcnt();
   const OptionalOperator Word64Popcnt();
+  const OptionalOperator Word32ReverseBits();
+  const OptionalOperator Word64ReverseBits();
   bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
 
   const Operator* Word64And();
@@ -213,6 +219,8 @@
   const Operator* ChangeFloat32ToFloat64();
   const Operator* ChangeFloat64ToInt32();   // narrowing
   const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* TruncateFloat32ToInt32();
+  const Operator* TruncateFloat32ToUint32();
   const Operator* TryTruncateFloat32ToInt64();
   const Operator* TryTruncateFloat64ToInt64();
   const Operator* TryTruncateFloat32ToUint64();
@@ -227,8 +235,10 @@
   const Operator* TruncateFloat64ToFloat32();
   const Operator* TruncateFloat64ToInt32(TruncationMode);
   const Operator* TruncateInt64ToInt32();
+  const Operator* RoundInt32ToFloat32();
   const Operator* RoundInt64ToFloat32();
   const Operator* RoundInt64ToFloat64();
+  const Operator* RoundUint32ToFloat32();
   const Operator* RoundUint64ToFloat32();
   const Operator* RoundUint64ToFloat64();
 
@@ -303,9 +313,12 @@
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
 
+  const Operator* StackSlot(MachineRepresentation rep);
+
   // Access to the machine stack.
   const Operator* LoadStackPointer();
   const Operator* LoadFramePointer();
+  const Operator* LoadParentFramePointer();
 
   // checked-load heap, index, length
   const Operator* CheckedLoad(CheckedLoadRepresentation);
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 75e4b9e..cdd7e34 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -227,19 +227,25 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore ra if the frame was elided.
+      __ Push(ra);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Addu(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(ra);
+    }
   }
 
  private:
@@ -546,11 +552,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -604,6 +605,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ lw(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -625,6 +633,13 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
     case kMipsAdd:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -688,6 +703,70 @@
     case kMipsClz:
       __ Clz(i.OutputRegister(), i.InputRegister(0));
       break;
+    case kMipsCtz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Subu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ clz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x1F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x20);
+      __ bind(&end);
+    } break;
+    case kMipsPopcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint32_t m1 = 0x55555555;
+      uint32_t m2 = 0x33333333;
+      uint32_t m4 = 0x0f0f0f0f;
+      uint32_t m8 = 0x00ff00ff;
+      uint32_t m16 = 0x0000ffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ srl(reg1, i.InputRegister(0), 1);
+      __ And(reg2, i.InputRegister(0), at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ srl(reg2, reg1, 2);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ srl(reg2, reg1, 4);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ srl(reg2, reg1, 8);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m16);
+      __ srl(reg2, reg1, 16);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(i.OutputRegister(), reg1, reg2);
+    } break;
     case kMipsShl:
       if (instr->InputAt(1)->IsRegister()) {
         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -950,6 +1029,12 @@
       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
       break;
     }
+    case kMipsCvtSUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      break;
+    }
     case kMipsCvtDUw: {
       FPURegister scratch = kScratchDoubleReg;
       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
@@ -1010,6 +1095,12 @@
       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
       break;
     }
+    case kMipsTruncUwS: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
+      __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
     case kMipsFloat64ExtractLowWord32:
       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -1416,19 +1507,10 @@
   MipsOperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   size_t const case_count = instr->InputCount() - 2;
-  Label here;
   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
-  __ BlockTrampolinePoolFor(case_count + 6);
-  __ bal(&here);
-  __ sll(at, input, 2);  // Branch delay slot.
-  __ bind(&here);
-  __ addu(at, at, ra);
-  __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
-  __ jr(at);
-  __ nop();  // Branch delay slot nop.
-  for (size_t index = 0; index < case_count; ++index) {
-    __ dd(GetLabel(i.InputRpo(index + 2)));
-  }
+  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+    return GetLabel(i.InputRpo(index + 2));
+  });
 }
 
 
@@ -1465,8 +1547,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index c938177..64aecd0 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -28,6 +28,8 @@
   V(MipsNor)                       \
   V(MipsXor)                       \
   V(MipsClz)                       \
+  V(MipsCtz)                       \
+  V(MipsPopcnt)                    \
   V(MipsShl)                       \
   V(MipsShr)                       \
   V(MipsSar)                       \
@@ -76,9 +78,11 @@
   V(MipsFloorWS)                   \
   V(MipsCeilWS)                    \
   V(MipsTruncUwD)                  \
+  V(MipsTruncUwS)                  \
   V(MipsCvtDW)                     \
   V(MipsCvtDUw)                    \
   V(MipsCvtSW)                     \
+  V(MipsCvtSUw)                    \
   V(MipsLb)                        \
   V(MipsLbu)                       \
   V(MipsSb)                        \
@@ -103,7 +107,6 @@
   V(MipsStoreToStackSlot)          \
   V(MipsStackClaim)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 61cea76..df972f7 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -151,7 +151,8 @@
     case MachineRepresentation::kWord32:
       opcode = kMipsLw;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -231,7 +232,8 @@
       case MachineRepresentation::kWord32:
         opcode = kMipsSw;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -401,10 +403,19 @@
 }
 
 
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
@@ -503,6 +514,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kMipsCvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kMipsCvtSUw, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kMipsCvtDW, node);
 }
@@ -513,6 +534,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kMipsTruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kMipsTruncUwS, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   MipsOperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -821,9 +852,11 @@
     // Poke any stack arguments.
     int slot = kCArgSlotCount;
     for (PushParameter input : (*arguments)) {
-      Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
-           g.TempImmediate(slot << kPointerSizeLog2));
-      ++slot;
+      if (input.node()) {
+        Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+             g.TempImmediate(slot << kPointerSizeLog2));
+        ++slot;
+      }
     }
   } else {
     // Possibly align stack here for functions.
@@ -869,9 +902,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1318,7 +1352,9 @@
              MachineOperatorBuilder::kFloat64RoundTruncate |
              MachineOperatorBuilder::kFloat64RoundTiesEven;
   }
-  return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+  return flags | MachineOperatorBuilder::kWord32Ctz |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kFloat64Min |
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 1b81aa5..373a1a6 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -227,19 +227,25 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore ra if the frame was elided.
+      __ Push(ra);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Daddu(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(ra);
+    }
   }
 
  private:
@@ -556,11 +562,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -614,6 +615,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ld(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -635,6 +643,13 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+               Operand(offset.offset()));
+      break;
+    }
     case kMips64Add:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -735,6 +750,142 @@
     case kMips64Dclz:
       __ dclz(i.OutputRegister(), i.InputRegister(0));
       break;
+    case kMips64Ctz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Subu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ clz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x1F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x20);
+      __ bind(&end);
+    } break;
+    case kMips64Dctz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Dsubu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ dclz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x3F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x40);
+      __ bind(&end);
+    } break;
+    case kMips64Popcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint32_t m1 = 0x55555555;
+      uint32_t m2 = 0x33333333;
+      uint32_t m4 = 0x0f0f0f0f;
+      uint32_t m8 = 0x00ff00ff;
+      uint32_t m16 = 0x0000ffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ dsrl(reg1, i.InputRegister(0), 1);
+      __ And(reg2, i.InputRegister(0), at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ dsrl(reg2, reg1, 2);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ dsrl(reg2, reg1, 4);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ dsrl(reg2, reg1, 8);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m16);
+      __ dsrl(reg2, reg1, 16);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(i.OutputRegister(), reg1, reg2);
+    } break;
+    case kMips64Dpopcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint64_t m1 = 0x5555555555555555;
+      uint64_t m2 = 0x3333333333333333;
+      uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
+      uint64_t m8 = 0x00ff00ff00ff00ff;
+      uint64_t m16 = 0x0000ffff0000ffff;
+      uint64_t m32 = 0x00000000ffffffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ dsrl(reg1, i.InputRegister(0), 1);
+      __ and_(reg2, i.InputRegister(0), at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ dsrl(reg2, reg1, 2);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ dsrl(reg2, reg1, 4);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ dsrl(reg2, reg1, 8);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 32 bits into those 32 bits.
+      __ li(at, m16);
+      __ dsrl(reg2, reg1, 16);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m32);
+      __ dsrl32(reg2, reg1, 0);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(i.OutputRegister(), reg1, reg2);
+    } break;
     case kMips64Shl:
       if (instr->InputAt(1)->IsRegister()) {
         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1065,6 +1216,10 @@
       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
       break;
     }
+    case kMips64CvtSUw: {
+      __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    }
     case kMips64CvtSL: {
       FPURegister scratch = kScratchDoubleReg;
       __ dmtc1(i.InputRegister(0), scratch);
@@ -1200,6 +1355,12 @@
       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
       break;
     }
+    case kMips64TruncUwS: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
     case kMips64TruncUlS: {
       FPURegister scratch = kScratchDoubleReg;
       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
@@ -1648,27 +1809,15 @@
   AssembleArchJump(i.InputRpo(1));
 }
 
-
 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   size_t const case_count = instr->InputCount() - 2;
-  Label here;
 
   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
-  __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
-  // Ensure that dd-ed labels use 8 byte aligned addresses.
-  __ Align(8);
-  __ bal(&here);
-  __ dsll(at, input, 3);  // Branch delay slot.
-  __ bind(&here);
-  __ daddu(at, at, ra);
-  __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
-  __ jr(at);
-  __ nop();  // Branch delay slot nop.
-  for (size_t index = 0; index < case_count; ++index) {
-    __ dd(GetLabel(i.InputRpo(index + 2)));
-  }
+  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+    return GetLabel(i.InputRpo(index + 2));
+  });
 }
 
 
@@ -1705,8 +1854,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 778c6ad..9e94c09 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -44,6 +44,10 @@
   V(Mips64Dext)                     \
   V(Mips64Dins)                     \
   V(Mips64Dclz)                     \
+  V(Mips64Ctz)                      \
+  V(Mips64Dctz)                     \
+  V(Mips64Popcnt)                   \
+  V(Mips64Dpopcnt)                  \
   V(Mips64Dshl)                     \
   V(Mips64Dshr)                     \
   V(Mips64Dsar)                     \
@@ -93,11 +97,13 @@
   V(Mips64TruncLS)                  \
   V(Mips64TruncLD)                  \
   V(Mips64TruncUwD)                 \
+  V(Mips64TruncUwS)                 \
   V(Mips64TruncUlS)                 \
   V(Mips64TruncUlD)                 \
   V(Mips64CvtDW)                    \
   V(Mips64CvtSL)                    \
   V(Mips64CvtSW)                    \
+  V(Mips64CvtSUw)                   \
   V(Mips64CvtSUl)                   \
   V(Mips64CvtDL)                    \
   V(Mips64CvtDUw)                   \
@@ -130,7 +136,6 @@
   V(Mips64StoreToStackSlot)         \
   V(Mips64StackClaim)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 1b12bd9..44a5470 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -159,6 +159,7 @@
     case MachineRepresentation::kWord64:
       opcode = kMips64Ld;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -241,6 +242,7 @@
       case MachineRepresentation::kWord64:
         opcode = kMips64Sd;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -562,16 +564,36 @@
 }
 
 
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
 
 
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Popcnt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
 
 
 void InstructionSelector::VisitWord64Ror(Node* node) {
@@ -802,6 +824,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kMips64CvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kMips64CvtSUw, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kMips64CvtDW, node);
 }
@@ -812,6 +844,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kMips64TruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kMips64TruncUwS, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   Mips64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1307,6 +1349,7 @@
       break;
     case MachineRepresentation::kBit:
     case MachineRepresentation::kTagged:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1356,6 +1399,7 @@
       break;
     case MachineRepresentation::kBit:
     case MachineRepresentation::kTagged:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1846,7 +1890,11 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kWord32ShiftIsSafe |
+  return MachineOperatorBuilder::kWord32Ctz |
+         MachineOperatorBuilder::kWord64Ctz |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord64Popcnt |
+         MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kFloat64Min |
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index bde3f7f..477f139 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -10,14 +10,17 @@
 
 namespace {
 
-typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
+struct MoveKey {
+  InstructionOperand source;
+  InstructionOperand destination;
+};
 
 struct MoveKeyCompare {
   bool operator()(const MoveKey& a, const MoveKey& b) const {
-    if (a.first.EqualsCanonicalized(b.first)) {
-      return a.second.CompareCanonicalized(b.second);
+    if (a.source.EqualsCanonicalized(b.source)) {
+      return a.destination.CompareCanonicalized(b.destination);
     }
-    return a.first.CompareCanonicalized(b.first);
+    return a.source.CompareCanonicalized(b.source);
   }
 };
 
@@ -32,39 +35,6 @@
 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
 
 
-bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
-  if (instr->IsNop()) return true;
-  if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
-      instr->ClobbersDoubleRegisters()) {
-    return false;
-  }
-  if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
-
-  ZoneSet<InstructionOperand, OperandCompare> operands(zone);
-  for (size_t i = 0; i < instr->InputCount(); ++i) {
-    operands.insert(*instr->InputAt(i));
-  }
-  for (size_t i = 0; i < instr->OutputCount(); ++i) {
-    operands.insert(*instr->OutputAt(i));
-  }
-  for (size_t i = 0; i < instr->TempCount(); ++i) {
-    operands.insert(*instr->TempAt(i));
-  }
-  for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
-       i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
-    ParallelMove* moves = instr->parallel_moves()[i];
-    if (moves == nullptr) continue;
-    for (MoveOperands* move : *moves) {
-      if (operands.count(move->source()) > 0 ||
-          operands.count(move->destination()) > 0) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-
 int FindFirstNonEmptySlot(const Instruction* instr) {
   int i = Instruction::FIRST_GAP_POSITION;
   for (; i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -85,11 +55,13 @@
 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
     : local_zone_(local_zone),
       code_(code),
-      to_finalize_(local_zone),
       local_vector_(local_zone) {}
 
 
 void MoveOptimizer::Run() {
+  for (Instruction* instruction : code()->instructions()) {
+    CompressGaps(instruction);
+  }
   for (InstructionBlock* block : code()->instruction_blocks()) {
     CompressBlock(block);
   }
@@ -111,13 +83,140 @@
     }
     OptimizeMerge(block);
   }
-  for (Instruction* gap : to_finalize_) {
+  for (Instruction* gap : code()->instructions()) {
     FinalizeMoves(gap);
   }
 }
 
+void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
+  if (instruction->IsCall()) return;
+  ParallelMove* moves = instruction->parallel_moves()[0];
+  if (moves == nullptr) return;
 
-void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+  DCHECK(instruction->parallel_moves()[1] == nullptr ||
+         instruction->parallel_moves()[1]->empty());
+
+  OperandSet outputs(local_zone());
+  OperandSet inputs(local_zone());
+
+  // Outputs and temps are treated together as potentially clobbering a
+  // destination operand.
+  for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+    outputs.insert(*instruction->OutputAt(i));
+  }
+  for (size_t i = 0; i < instruction->TempCount(); ++i) {
+    outputs.insert(*instruction->TempAt(i));
+  }
+
+  // Input operands block elisions.
+  for (size_t i = 0; i < instruction->InputCount(); ++i) {
+    inputs.insert(*instruction->InputAt(i));
+  }
+
+  // Elide moves made redundant by the instruction.
+  for (MoveOperands* move : *moves) {
+    if (outputs.find(move->destination()) != outputs.end() &&
+        inputs.find(move->destination()) == inputs.end()) {
+      move->Eliminate();
+    }
+  }
+
+  // The ret instruction makes any assignment before it unnecessary, except for
+  // the one for its input.
+  if (instruction->opcode() == ArchOpcode::kArchRet) {
+    for (MoveOperands* move : *moves) {
+      if (inputs.find(move->destination()) == inputs.end()) {
+        move->Eliminate();
+      }
+    }
+  }
+}
+
+void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
+  if (from->IsCall()) return;
+
+  ParallelMove* from_moves = from->parallel_moves()[0];
+  if (from_moves == nullptr || from_moves->empty()) return;
+
+  ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
+  ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+
+  // If an operand is an input to the instruction, we cannot move assignments
+  // where it appears on the LHS.
+  for (size_t i = 0; i < from->InputCount(); ++i) {
+    dst_cant_be.insert(*from->InputAt(i));
+  }
+  // If an operand is output to the instruction, we cannot move assignments
+  // where it appears on the RHS, because we would lose its value before the
+  // instruction.
+  // Same for temp operands.
+  // The output can't appear on the LHS because we performed
+  // RemoveClobberedDestinations for the "from" instruction.
+  for (size_t i = 0; i < from->OutputCount(); ++i) {
+    src_cant_be.insert(*from->OutputAt(i));
+  }
+  for (size_t i = 0; i < from->TempCount(); ++i) {
+    src_cant_be.insert(*from->TempAt(i));
+  }
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    // Assume dest has a value "V". If we have a "dest = y" move, then we can't
+    // move "z = dest", because z would become y rather than "V".
+    // We assume CompressMoves has happened before this, which means we don't
+    // have more than one assignment to dest.
+    src_cant_be.insert(move->destination());
+  }
+
+  ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
+  // We start with all the moves that don't have conflicting source or
+  // destination operands are eligible for being moved down.
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+      MoveKey key = {move->source(), move->destination()};
+      move_candidates.insert(key);
+    }
+  }
+  if (move_candidates.empty()) return;
+
+  // Stabilize the candidate set.
+  bool changed = false;
+  do {
+    changed = false;
+    for (auto iter = move_candidates.begin(); iter != move_candidates.end();) {
+      auto current = iter;
+      ++iter;
+      InstructionOperand src = current->source;
+      if (src_cant_be.find(src) != src_cant_be.end()) {
+        src_cant_be.insert(current->destination);
+        move_candidates.erase(current);
+        changed = true;
+      }
+    }
+  } while (changed);
+
+  ParallelMove to_move(local_zone());
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    MoveKey key = {move->source(), move->destination()};
+    if (move_candidates.find(key) != move_candidates.end()) {
+      to_move.AddMove(move->source(), move->destination(), code_zone());
+      move->Eliminate();
+    }
+  }
+  if (to_move.empty()) return;
+
+  ParallelMove* dest =
+      to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone());
+
+  CompressMoves(&to_move, dest);
+  DCHECK(dest->empty());
+  for (MoveOperands* m : to_move) {
+    dest->push_back(m);
+  }
+}
+
+void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
   if (right == nullptr) return;
 
   MoveOpVector& eliminated = local_vector();
@@ -147,54 +246,49 @@
   DCHECK(eliminated.empty());
 }
 
+void MoveOptimizer::CompressGaps(Instruction* instruction) {
+  int i = FindFirstNonEmptySlot(instruction);
+  bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+  USE(has_moves);
 
-// Smash all consecutive moves into the left most move slot and accumulate them
-// as much as possible across instructions.
-void MoveOptimizer::CompressBlock(InstructionBlock* block) {
-  Instruction* prev_instr = nullptr;
-  for (int index = block->code_start(); index < block->code_end(); ++index) {
-    Instruction* instr = code()->instructions()[index];
-    int i = FindFirstNonEmptySlot(instr);
-    bool has_moves = i <= Instruction::LAST_GAP_POSITION;
-
-    if (i == Instruction::LAST_GAP_POSITION) {
-      std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
-                instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
-    } else if (i == Instruction::FIRST_GAP_POSITION) {
-      CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
-                    instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
-    }
-    // We either have no moves, or, after swapping or compressing, we have
-    // all the moves in the first gap position, and none in the second/end gap
-    // position.
-    ParallelMove* first =
-        instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
-    ParallelMove* last =
-        instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
-    USE(last);
-
-    DCHECK(!has_moves ||
-           (first != nullptr && (last == nullptr || last->empty())));
-
-    if (prev_instr != nullptr) {
-      if (has_moves) {
-        // Smash first into prev_instr, killing left.
-        ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
-        CompressMoves(pred_moves, first);
-      }
-      // Slide prev_instr down so we always know where to look for it.
-      std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
-    }
-
-    prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
-    if (GapsCanMoveOver(instr, local_zone())) continue;
-    if (prev_instr != nullptr) {
-      to_finalize_.push_back(prev_instr);
-      prev_instr = nullptr;
-    }
+  if (i == Instruction::LAST_GAP_POSITION) {
+    std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+              instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+  } else if (i == Instruction::FIRST_GAP_POSITION) {
+    CompressMoves(
+        instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+        instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
   }
-  if (prev_instr != nullptr) {
-    to_finalize_.push_back(prev_instr);
+  // We either have no moves, or, after swapping or compressing, we have
+  // all the moves in the first gap position, and none in the second/end gap
+  // position.
+  ParallelMove* first =
+      instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+  ParallelMove* last =
+      instruction->parallel_moves()[Instruction::LAST_GAP_POSITION];
+  USE(first);
+  USE(last);
+
+  DCHECK(!has_moves ||
+         (first != nullptr && (last == nullptr || last->empty())));
+}
+
+void MoveOptimizer::CompressBlock(InstructionBlock* block) {
+  int first_instr_index = block->first_instruction_index();
+  int last_instr_index = block->last_instruction_index();
+
+  // Start by removing gap assignments where the output of the subsequent
+  // instruction appears on LHS, as long as they are not needed by its input.
+  Instruction* prev_instr = code()->instructions()[first_instr_index];
+  RemoveClobberedDestinations(prev_instr);
+
+  for (int index = first_instr_index + 1; index <= last_instr_index; ++index) {
+    Instruction* instr = code()->instructions()[index];
+    // Migrate to the gap of prev_instr eligible moves from instr.
+    MigrateMoves(instr, prev_instr);
+    // Remove gap assignments clobbered by instr's output.
+    RemoveClobberedDestinations(instr);
+    prev_instr = instr;
   }
 }
 
@@ -211,6 +305,12 @@
   // things that would prevent moving gap moves across them.
   for (RpoNumber& pred_index : block->predecessors()) {
     const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+
+    // If the predecessor has more than one successor, we shouldn't attempt to
+    // move down to this block (one of the successors) any of the gap moves,
+    // because their effect may be necessary to the other successors.
+    if (pred->SuccessorCount() > 1) return;
+
     const Instruction* last_instr =
         code()->instructions()[pred->last_instruction_index()];
     if (last_instr->IsCall()) return;
@@ -246,21 +346,54 @@
       }
     }
   }
-  if (move_map.empty() || correct_counts != move_map.size()) return;
+  if (move_map.empty() || correct_counts == 0) return;
+
   // Find insertion point.
-  Instruction* instr = nullptr;
-  for (int i = block->first_instruction_index();
-       i <= block->last_instruction_index(); ++i) {
-    instr = code()->instructions()[i];
-    if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
-      break;
+  Instruction* instr = code()->instructions()[block->first_instruction_index()];
+
+  if (correct_counts != move_map.size()) {
+    // Moves that are unique to each predecessor won't be pushed to the common
+    // successor.
+    OperandSet conflicting_srcs(local_zone());
+    for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+      auto current = iter;
+      ++iter;
+      if (current->second != block->PredecessorCount()) {
+        InstructionOperand dest = current->first.destination;
+        // Not all the moves in all the gaps are the same. Maybe some are. If
+        // there are such moves, we could move them, but the destination of the
+        // moves staying behind can't appear as a source of a common move,
+        // because the move staying behind will clobber this destination.
+        conflicting_srcs.insert(dest);
+        move_map.erase(current);
+      }
+    }
+
+    bool changed = false;
+    do {
+      // If a common move can't be pushed to the common successor, then its
+      // destination also can't appear as source to any move being pushed.
+      changed = false;
+      for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+        auto current = iter;
+        ++iter;
+        DCHECK_EQ(block->PredecessorCount(), current->second);
+        if (conflicting_srcs.find(current->first.source) !=
+            conflicting_srcs.end()) {
+          conflicting_srcs.insert(current->first.destination);
+          move_map.erase(current);
+          changed = true;
+        }
+      }
+    } while (changed);
   }
+
+  if (move_map.empty()) return;
+
   DCHECK_NOT_NULL(instr);
   bool gap_initialized = true;
-  if (instr->parallel_moves()[0] == nullptr ||
-      instr->parallel_moves()[0]->empty()) {
-    to_finalize_.push_back(instr);
-  } else {
+  if (instr->parallel_moves()[0] != nullptr &&
+      !instr->parallel_moves()[0]->empty()) {
     // Will compress after insertion.
     gap_initialized = false;
     std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
@@ -275,12 +408,12 @@
       if (move->IsRedundant()) continue;
       MoveKey key = {move->source(), move->destination()};
       auto it = move_map.find(key);
-      USE(it);
-      DCHECK(it != move_map.end());
-      if (first_iteration) {
-        moves->AddMove(move->source(), move->destination());
+      if (it != move_map.end()) {
+        if (first_iteration) {
+          moves->AddMove(move->source(), move->destination());
+        }
+        move->Eliminate();
       }
-      move->Eliminate();
     }
     first_iteration = false;
   }
@@ -288,6 +421,7 @@
   if (!gap_initialized) {
     CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
   }
+  CompressBlock(block);
 }
 
 
@@ -316,8 +450,10 @@
   MoveOpVector& loads = local_vector();
   DCHECK(loads.empty());
 
+  ParallelMove* parallel_moves = instr->parallel_moves()[0];
+  if (parallel_moves == nullptr) return;
   // Find all the loads.
-  for (MoveOperands* move : *instr->parallel_moves()[0]) {
+  for (MoveOperands* move : *parallel_moves) {
     if (move->IsRedundant()) continue;
     if (move->source().IsConstant() || IsSlot(move->source())) {
       loads.push_back(move);
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
index c9a3289..8e932a0 100644
--- a/src/compiler/move-optimizer.h
+++ b/src/compiler/move-optimizer.h
@@ -26,15 +26,30 @@
   Zone* code_zone() const { return code()->zone(); }
   MoveOpVector& local_vector() { return local_vector_; }
 
-  void CompressBlock(InstructionBlock* blocke);
-  void CompressMoves(ParallelMove* left, ParallelMove* right);
+  // Consolidate moves into the first gap.
+  void CompressGaps(Instruction* instr);
+
+  // Attempt to push down to the last instruction those moves that can.
+  void CompressBlock(InstructionBlock* block);
+
+  // Consolidate moves into the first gap.
+  void CompressMoves(ParallelMove* left, MoveOpVector* right);
+
+  // Push down those moves in the gap of from that do not change the
+  // semantics of the from instruction, nor the semantics of the moves
+  // that remain behind.
+  void MigrateMoves(Instruction* to, Instruction* from);
+
+  void RemoveClobberedDestinations(Instruction* instruction);
+
   const Instruction* LastInstruction(const InstructionBlock* block) const;
+
+  // Consolidate common moves appearing accross all predecessors of a block.
   void OptimizeMerge(InstructionBlock* block);
   void FinalizeMoves(Instruction* instr);
 
   Zone* const local_zone_;
   InstructionSequence* const code_;
-  Instructions to_finalize_;
   MoveOpVector local_vector_;
 
   DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index cb6c3c4..ac9cc34 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -4,11 +4,12 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/verifier.h"
-#include "src/types-inl.h"
+#include "src/handles-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -123,6 +124,7 @@
 
 // static
 bool NodeProperties::IsExceptionalCall(Node* node) {
+  if (node->op()->HasProperty(Operator::kNoThrow)) return false;
   for (Edge const edge : node->use_edges()) {
     if (!NodeProperties::IsControlEdge(edge)) continue;
     if (edge.from()->opcode() == IrOpcode::kIfException) return true;
@@ -334,6 +336,16 @@
     Node* node, MaybeHandle<Context> native_context) {
   while (true) {
     switch (node->opcode()) {
+      case IrOpcode::kJSLoadContext: {
+        ContextAccess const& access = ContextAccessOf(node->op());
+        if (access.index() != Context::NATIVE_CONTEXT_INDEX) {
+          return MaybeHandle<Context>();
+        }
+        // Skip over the intermediate contexts, we're only interested in the
+        // very last context in the context chain anyway.
+        node = NodeProperties::GetContextInput(node);
+        break;
+      }
       case IrOpcode::kJSCreateBlockContext:
       case IrOpcode::kJSCreateCatchContext:
       case IrOpcode::kJSCreateFunctionContext:
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index a97fdfa..c78e15e 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -128,7 +128,6 @@
 #define JS_CONTEXT_OP_LIST(V) \
   V(JSLoadContext)            \
   V(JSStoreContext)           \
-  V(JSLoadDynamic)            \
   V(JSCreateFunctionContext)  \
   V(JSCreateCatchContext)     \
   V(JSCreateWithContext)      \
@@ -202,6 +201,7 @@
   V(StoreBuffer)                   \
   V(StoreElement)                  \
   V(ObjectIsNumber)                \
+  V(ObjectIsReceiver)              \
   V(ObjectIsSmi)
 
 // Opcodes for Machine-level operators.
@@ -227,6 +227,7 @@
   MACHINE_COMPARE_BINOP_LIST(V) \
   V(Load)                       \
   V(Store)                      \
+  V(StackSlot)                  \
   V(Word32And)                  \
   V(Word32Or)                   \
   V(Word32Xor)                  \
@@ -236,6 +237,7 @@
   V(Word32Ror)                  \
   V(Word32Clz)                  \
   V(Word32Ctz)                  \
+  V(Word32ReverseBits)          \
   V(Word32Popcnt)               \
   V(Word64Popcnt)               \
   V(Word64And)                  \
@@ -247,6 +249,7 @@
   V(Word64Ror)                  \
   V(Word64Clz)                  \
   V(Word64Ctz)                  \
+  V(Word64ReverseBits)          \
   V(Int32Add)                   \
   V(Int32AddWithOverflow)       \
   V(Int32Sub)                   \
@@ -270,6 +273,8 @@
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
+  V(TruncateFloat32ToInt32)     \
+  V(TruncateFloat32ToUint32)    \
   V(TryTruncateFloat32ToInt64)  \
   V(TryTruncateFloat64ToInt64)  \
   V(TryTruncateFloat32ToUint64) \
@@ -281,8 +286,10 @@
   V(TruncateFloat64ToFloat32)   \
   V(TruncateFloat64ToInt32)     \
   V(TruncateInt64ToInt32)       \
+  V(RoundInt32ToFloat32)        \
   V(RoundInt64ToFloat32)        \
   V(RoundInt64ToFloat64)        \
+  V(RoundUint32ToFloat32)       \
   V(RoundUint64ToFloat32)       \
   V(RoundUint64ToFloat64)       \
   V(BitcastFloat32ToInt32)      \
@@ -321,6 +328,7 @@
   V(Float64InsertHighWord32)    \
   V(LoadStackPointer)           \
   V(LoadFramePointer)           \
+  V(LoadParentFramePointer)     \
   V(CheckedLoad)                \
   V(CheckedStore)
 
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index bd704a3..1ee31d5 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -55,7 +55,6 @@
     case IrOpcode::kJSCreateLiteralRegExp:
 
     // Context operations
-    case IrOpcode::kJSLoadDynamic:
     case IrOpcode::kJSCreateScriptContext:
 
     // Conversions
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 4d6aacd..21c34fc 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -30,8 +30,8 @@
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-call-reducer.h"
-#include "src/compiler/js-context-relaxation.h"
 #include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-create-lowering.h"
 #include "src/compiler/js-frame-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-global-object-specialization.h"
@@ -276,11 +276,8 @@
         info()->isolate(), instruction_zone(), instruction_blocks);
   }
 
-  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
-                                        CallDescriptor* descriptor,
-                                        const char* debug_name) {
+  void InitializeFrameData(CallDescriptor* descriptor) {
     DCHECK(frame_ == nullptr);
-    DCHECK(register_allocation_data_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
       fixed_frame_size = (descriptor->IsCFunctionCall())
@@ -289,6 +286,12 @@
                              : StandardFrameConstants::kFixedSlotCount;
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+  }
+
+  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+                                        CallDescriptor* descriptor,
+                                        const char* debug_name) {
+    DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
                                sequence(), debug_name);
@@ -512,7 +515,7 @@
     if (data->info()->shared_info()->HasBytecodeArray()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
                                          data->jsgraph());
-      succeeded = graph_builder.CreateGraph(stack_check);
+      succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
@@ -536,7 +539,7 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(data->jsgraph(),
+    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
                                data->info()->is_deoptimization_enabled()
                                    ? JSCallReducer::kDeoptimizationEnabled
                                    : JSCallReducer::kNoFlags,
@@ -549,17 +552,19 @@
     JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
                                                data->jsgraph());
     JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
-            : JSGlobalObjectSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies());
+        &graph_reducer, data->jsgraph(), data->native_context(),
+        data->info()->dependencies());
+    JSNativeContextSpecialization::Flags flags =
+        JSNativeContextSpecialization::kNoFlags;
+    if (data->info()->is_bailout_on_uninitialized()) {
+      flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
+    }
+    if (data->info()->is_deoptimization_enabled()) {
+      flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
+    }
     JSNativeContextSpecialization native_context_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSNativeContextSpecialization::kDeoptimizationEnabled
-            : JSNativeContextSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies(), temp_zone);
+        &graph_reducer, data->jsgraph(), flags, data->native_context(),
+        data->info()->dependencies(), temp_zone);
     JSInliningHeuristic inlining(&graph_reducer,
                                  data->info()->is_inlining_enabled()
                                      ? JSInliningHeuristic::kGeneralInlining
@@ -570,7 +575,9 @@
     if (data->info()->is_frame_specializing()) {
       AddReducer(data, &graph_reducer, &frame_specialization);
     }
-    AddReducer(data, &graph_reducer, &global_object_specialization);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &global_object_specialization);
+    }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &call_reducer);
@@ -610,6 +617,13 @@
                                               data->common());
     LoadElimination load_elimination(&graph_reducer);
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+    MaybeHandle<LiteralsArray> literals_array =
+        data->info()->is_native_context_specializing()
+            ? handle(data->info()->closure()->literals(), data->isolate())
+            : MaybeHandle<LiteralsArray>();
+    JSCreateLowering create_lowering(
+        &graph_reducer, data->info()->dependencies(), data->jsgraph(),
+        literals_array, temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -629,6 +643,9 @@
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &builtin_reducer);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &create_lowering);
+    }
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
@@ -664,8 +681,11 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
+    escape_reducer.SetExistsVirtualAllocate(
+        escape_analysis.ExistsVirtualAllocate());
     AddReducer(data, &graph_reducer, &escape_reducer);
     graph_reducer.ReduceGraph();
+    escape_reducer.VerifyReplacement();
   }
 };
 
@@ -677,6 +697,13 @@
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
                                 data->source_positions());
     lowering.LowerAllNodes();
+
+    // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+    if (lowering.abort_compilation_) {
+      data->set_compilation_failed();
+      return;
+    }
+
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
@@ -772,7 +799,6 @@
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    JSContextRelaxation context_relaxing;
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -782,7 +808,6 @@
     SelectLowering select_lowering(data->jsgraph()->graph(),
                                    data->jsgraph()->common());
     TailCallOptimization tco(data->common(), data->graph());
-    AddReducer(data, &graph_reducer, &context_relaxing);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     AddReducer(data, &graph_reducer, &generic_lowering);
@@ -813,7 +838,7 @@
   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
     InstructionSelector selector(
         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
-        data->schedule(), data->source_positions(),
+        data->schedule(), data->source_positions(), data->frame(),
         data->info()->is_source_positions_enabled()
             ? InstructionSelector::kAllSourcePositions
             : InstructionSelector::kCallSourcePositions);
@@ -979,9 +1004,10 @@
 struct JumpThreadingPhase {
   static const char* phase_name() { return "jump threading"; }
 
-  void Run(PipelineData* data, Zone* temp_zone) {
+  void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
     ZoneVector<RpoNumber> result(temp_zone);
-    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+                                         frame_at_start)) {
       JumpThreading::ApplyForwarding(result, data->sequence());
     }
   }
@@ -1053,13 +1079,6 @@
 
 
 Handle<Code> Pipeline::GenerateCode() {
-  // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
-  // the correct solution is to restore the context register after invoking
-  // builtins from full-codegen.
-  if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
-    return Handle<Code>::null();
-  }
-
   ZonePool zone_pool;
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
 
@@ -1073,13 +1092,14 @@
     if (json_file != nullptr) {
       OFStream json_of(json_file);
       Handle<Script> script = info()->script();
-      FunctionLiteral* function = info()->literal();
       base::SmartArrayPointer<char> function_name = info()->GetDebugName();
       int pos = info()->shared_info()->start_position();
       json_of << "{\"function\":\"" << function_name.get()
               << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
-      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      if (info()->has_literal() && !script->IsUndefined() &&
+          !script->source()->IsUndefined()) {
         DisallowHeapAllocation no_allocation;
+        FunctionLiteral* function = info()->literal();
         int start = function->start_position();
         int len = function->end_position() - start;
         String::SubStringRange source(String::cast(script->source()), start,
@@ -1204,6 +1224,9 @@
   // Kill the Typer and thereby uninstall the decorator (if any).
   typer.Reset(nullptr);
 
+  // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+  if (data.compilation_failed()) return Handle<Code>::null();
+
   return ScheduleAndGenerateCode(
       Linkage::ComputeIncoming(data.instruction_zone(), info()));
 }
@@ -1212,10 +1235,9 @@
 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
                                                CallDescriptor* call_descriptor,
                                                Graph* graph, Schedule* schedule,
-                                               Code::Kind kind,
+                                               Code::Flags flags,
                                                const char* debug_name) {
-  CompilationInfo info(debug_name, isolate, graph->zone());
-  info.set_output_code_kind(kind);
+  CompilationInfo info(debug_name, isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool;
@@ -1286,6 +1308,7 @@
   PipelineData data(&zone_pool, &info, sequence);
   Pipeline pipeline(&info);
   pipeline.data_ = &data;
+  pipeline.data_->InitializeFrameData(nullptr);
   pipeline.AllocateRegisters(config, nullptr, run_verifier);
   return !data.compilation_failed();
 }
@@ -1308,6 +1331,7 @@
 
   data->InitializeInstructionSequence();
 
+  data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
   Linkage linkage(call_descriptor);
   Run<InstructionSelectionPhase>(&linkage);
@@ -1329,6 +1353,7 @@
   BeginPhaseKind("register allocation");
 
   bool run_verifier = FLAG_turbo_verify_allocation;
+
   // Allocate registers.
   AllocateRegisters(
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
@@ -1339,10 +1364,16 @@
   }
 
   BeginPhaseKind("code generation");
-
+  // TODO(mtrofin): move this off to the register allocator.
+  bool generate_frame_at_start =
+      !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
+      !data_->frame()->needs_frame() ||
+      data_->sequence()->instruction_blocks().front()->needs_frame() ||
+      linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
+      linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
   // Optimimize jumps.
   if (FLAG_turbo_jt) {
-    Run<JumpThreadingPhase>();
+    Run<JumpThreadingPhase>(generate_frame_at_start);
   }
 
   // Generate final machine code.
@@ -1446,7 +1477,8 @@
     Run<MergeSplintersPhase>();
   }
 
-  if (FLAG_turbo_frame_elision) {
+  // We plan to enable frame elision only for stubs and bytecode handlers.
+  if (FLAG_turbo_frame_elision && info()->IsStub()) {
     Run<LocateSpillSlotsPhase>();
     Run<FrameElisionPhase>();
   }
@@ -1482,6 +1514,8 @@
   data->DeleteRegisterAllocationZone();
 }
 
+Isolate* Pipeline::isolate() const { return info()->isolate(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index af94018..edb8191 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -7,11 +7,12 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
-#include "src/compiler.h"
+#include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
+class CompilationInfo;
 class RegisterConfiguration;
 
 namespace compiler {
@@ -35,7 +36,7 @@
   static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
                                               CallDescriptor* call_descriptor,
                                               Graph* graph, Schedule* schedule,
-                                              Code::Kind kind,
+                                              Code::Flags flags,
                                               const char* debug_name);
 
   // Run the pipeline on a machine graph and generate code. If {schedule} is
@@ -57,23 +58,27 @@
                                              Schedule* schedule = nullptr);
 
  private:
-  CompilationInfo* info_;
-  PipelineData* data_;
-
   // Helpers for executing pipeline phases.
   template <typename Phase>
   void Run();
   template <typename Phase, typename Arg0>
   void Run(Arg0 arg_0);
-
-  CompilationInfo* info() const { return info_; }
-  Isolate* isolate() { return info_->isolate(); }
+  template <typename Phase, typename Arg0, typename Arg1>
+  void Run(Arg0 arg_0, Arg1 arg_1);
 
   void BeginPhaseKind(const char* phase_kind);
   void RunPrintAndVerify(const char* phase, bool untyped = false);
   Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
   void AllocateRegisters(const RegisterConfiguration* config,
                          CallDescriptor* descriptor, bool run_verifier);
+
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const;
+
+  CompilationInfo* const info_;
+  PipelineData* data_;
+
+  DISALLOW_COPY_AND_ASSIGN(Pipeline);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 154cd64..7fc6dd9 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -167,6 +167,19 @@
       : OutOfLineCode(gen),
         object_(object),
         offset_(offset),
+        offset_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(no_reg),
+        offset_immediate_(offset),
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
@@ -176,24 +189,39 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ mflr(scratch1_);
+      __ Push(scratch1_);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
-    __ add(scratch1_, object_, offset_);
+                         remembered_set_action, save_fp_mode);
+    if (offset_.is(no_reg)) {
+      __ addi(scratch1_, object_, Operand(offset_immediate_));
+    } else {
+      DCHECK_EQ(0, offset_immediate_);
+      __ add(scratch1_, object_, offset_);
+    }
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Pop(scratch1_);
+      __ mtlr(scratch1_);
+    }
   }
 
  private:
   Register const object_;
   Register const offset_;
+  int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -651,13 +679,7 @@
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
   if (frame()->needs_frame()) {
-    if (FLAG_enable_embedded_constant_pool) {
-      __ LoadP(kConstantPoolRegister,
-               MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
-    }
-    __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ mtlr(r0);
+    __ RestoreFrameStateForTailCall();
   }
   frame_access_state()->SetFrameAccessToSP();
 }
@@ -740,13 +762,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
-          masm());
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -807,6 +822,13 @@
       __ mr(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mr(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       // TODO(mbrandy): move slow call to stub out of line.
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -816,19 +838,38 @@
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
       Register object = i.InputRegister(0);
-      Register offset = i.InputRegister(1);
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
-      auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
-                                                   scratch0, scratch1, mode);
-      __ StorePX(value, MemOperand(object, offset));
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_MRI) {
+        int32_t offset = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      } else {
+        DCHECK_EQ(kMode_MRR, addressing_mode);
+        Register offset(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StorePX(value, MemOperand(object, offset));
+      }
       __ CheckPageFlag(object, scratch0,
                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
                        ool->entry());
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
     case kPPC_And:
       if (HasRegisterInput(instr, 1)) {
         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1194,10 +1235,19 @@
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
 #endif
+    case kPPC_Int32ToFloat32:
+      __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
     case kPPC_Int32ToDouble:
       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
+    case kPPC_Uint32ToFloat32:
+      __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+                                   i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
     case kPPC_Uint32ToDouble:
       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
                                     i.OutputDoubleRegister());
@@ -1581,8 +1631,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index a3bf80e..877ebb5 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -82,7 +82,9 @@
   V(PPC_Int64ToDouble)             \
   V(PPC_Uint64ToFloat32)           \
   V(PPC_Uint64ToDouble)            \
+  V(PPC_Int32ToFloat32)            \
   V(PPC_Int32ToDouble)             \
+  V(PPC_Uint32ToFloat32)           \
   V(PPC_Uint32ToDouble)            \
   V(PPC_Float32ToDouble)           \
   V(PPC_DoubleToInt32)             \
@@ -114,7 +116,6 @@
   V(PPC_StoreFloat32)              \
   V(PPC_StoreDouble)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index fc90cdd..fd1df6a 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -81,7 +81,9 @@
     case kPPC_Int64ToDouble:
     case kPPC_Uint64ToFloat32:
     case kPPC_Uint64ToDouble:
+    case kPPC_Int32ToFloat32:
     case kPPC_Int32ToDouble:
+    case kPPC_Uint32ToFloat32:
     case kPPC_Uint32ToDouble:
     case kPPC_Float32ToDouble:
     case kPPC_DoubleToInt32:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index f6ebbdf..244e6f4 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -200,6 +200,7 @@
 #else
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -227,13 +228,25 @@
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
   MachineRepresentation rep = store_rep.representation();
 
-  // TODO(ppc): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(offset);
+    // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_PPC64
+        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+            ) {
+      inputs[input_count++] = g.UseImmediate(offset);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(offset);
+      addressing_mode = kMode_MRR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -255,6 +268,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -289,6 +303,7 @@
 #else
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -340,6 +355,7 @@
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -385,6 +401,7 @@
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -825,6 +842,14 @@
 #endif
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+
 void InstructionSelector::VisitInt32Add(Node* node) {
   VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
 }
@@ -940,6 +965,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Uint32ToFloat32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kPPC_Int32ToDouble, node);
 }
@@ -1010,6 +1045,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kPPC_DoubleToInt32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kPPC_DoubleToUint32, node);
+}
+
+
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   // TODO(mbrandy): inspect input to see if nop is appropriate.
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 4df2bde..0d4b8cb 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -63,7 +63,7 @@
 void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
                                  RawMachineLabel* false_val) {
   DCHECK(current_block_ != schedule()->end());
-  Node* branch = AddNode(common()->Branch(), condition);
+  Node* branch = MakeNode(common()->Branch(), 1, &condition);
   schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
   current_block_ = nullptr;
 }
@@ -152,6 +152,19 @@
   return AddNode(common()->Call(desc), input_count, buffer);
 }
 
+Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
+                                        Node* context) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(descriptor->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(0);
+
+  return AddNode(common()->Call(descriptor), centry, ref, arity, context);
+}
 
 Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
                                         Node* arg1, Node* context) {
@@ -183,6 +196,21 @@
                  context);
 }
 
+Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
+                                        Node* arg1, Node* arg2, Node* arg3,
+                                        Node* context) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(descriptor->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(3);
+
+  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
+                 arity, context);
+}
 
 Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
                                         Node* arg1, Node* arg2, Node* arg3,
@@ -266,6 +294,51 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* context) {
+  const int kArity = 3;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
+
+Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* arg4, Node* context) {
+  const int kArity = 4;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
 
 Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
                                           Node* function) {
@@ -354,9 +427,24 @@
   return current_block_;
 }
 
+Node* RawMachineAssembler::Phi(MachineRepresentation rep, int input_count,
+                               Node* const* inputs) {
+  Node** buffer = new (zone()->New(sizeof(Node*) * (input_count + 1)))
+      Node*[input_count + 1];
+  std::copy(inputs, inputs + input_count, buffer);
+  buffer[input_count] = graph()->start();
+  return AddNode(common()->Phi(rep, input_count), input_count + 1, buffer);
+}
+
+void RawMachineAssembler::AppendPhiInput(Node* phi, Node* new_input) {
+  const Operator* op = phi->op();
+  const Operator* new_op = common()->ResizeMergeOrPhi(op, phi->InputCount());
+  phi->InsertInput(zone(), phi->InputCount() - 1, new_input);
+  NodeProperties::ChangeOp(phi, new_op);
+}
 
 Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
-                                   Node** inputs) {
+                                   Node* const* inputs) {
   DCHECK_NOT_NULL(schedule_);
   DCHECK_NOT_NULL(current_block_);
   Node* node = MakeNode(op, input_count, inputs);
@@ -364,9 +452,8 @@
   return node;
 }
 
-
 Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
-                                    Node** inputs) {
+                                    Node* const* inputs) {
   // The raw machine assembler nodes do not have effect and control inputs,
   // so we disable checking input counts here.
   return graph()->NewNodeUnchecked(op, input_count, inputs);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 5c232ed..a0cb7a0 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -79,6 +79,9 @@
   Node* Int32Constant(int32_t value) {
     return AddNode(common()->Int32Constant(value));
   }
+  Node* StackSlot(MachineRepresentation rep) {
+    return AddNode(machine()->StackSlot(rep));
+  }
   Node* Int64Constant(int64_t value) {
     return AddNode(common()->Int64Constant(value));
   }
@@ -147,7 +150,7 @@
     return AddNode(machine()->WordEqual(), a, b);
   }
   Node* WordNotEqual(Node* a, Node* b) {
-    return WordBinaryNot(WordEqual(a, b));
+    return Word32BinaryNot(WordEqual(a, b));
   }
   Node* WordNot(Node* a) {
     if (machine()->Is32()) {
@@ -156,13 +159,6 @@
       return Word64Not(a);
     }
   }
-  Node* WordBinaryNot(Node* a) {
-    if (machine()->Is32()) {
-      return Word32BinaryNot(a);
-    } else {
-      return Word64BinaryNot(a);
-    }
-  }
 
   Node* Word32And(Node* a, Node* b) {
     return AddNode(machine()->Word32And(), a, b);
@@ -221,10 +217,9 @@
     return AddNode(machine()->Word64Equal(), a, b);
   }
   Node* Word64NotEqual(Node* a, Node* b) {
-    return Word64BinaryNot(Word64Equal(a, b));
+    return Word32BinaryNot(Word64Equal(a, b));
   }
   Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
-  Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
 
   Node* Int32Add(Node* a, Node* b) {
     return AddNode(machine()->Int32Add(), a, b);
@@ -275,6 +270,10 @@
   Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
     return Int32LessThanOrEqual(b, a);
   }
+  Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); }
+  Node* Uint32GreaterThanOrEqual(Node* a, Node* b) {
+    return Uint32LessThanOrEqual(b, a);
+  }
   Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
 
   Node* Int64Add(Node* a, Node* b) {
@@ -315,6 +314,10 @@
   Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
     return Int64LessThanOrEqual(b, a);
   }
+  Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); }
+  Node* Uint64GreaterThanOrEqual(Node* a, Node* b) {
+    return Uint64LessThanOrEqual(b, a);
+  }
   Node* Uint64Div(Node* a, Node* b) {
     return AddNode(machine()->Uint64Div(), a, b);
   }
@@ -339,6 +342,19 @@
 
 #undef INTPTR_BINOP
 
+#define UINTPTR_BINOP(prefix, name)                    \
+  Node* UintPtr##name(Node* a, Node* b) {              \
+    return kPointerSize == 8 ? prefix##64##name(a, b)  \
+                             : prefix##32##name(a, b); \
+  }
+
+  UINTPTR_BINOP(Uint, LessThan);
+  UINTPTR_BINOP(Uint, LessThanOrEqual);
+  UINTPTR_BINOP(Uint, GreaterThanOrEqual);
+  UINTPTR_BINOP(Uint, GreaterThan);
+
+#undef UINTPTR_BINOP
+
   Node* Float32Add(Node* a, Node* b) {
     return AddNode(machine()->Float32Add(), a, b);
   }
@@ -363,7 +379,7 @@
     return AddNode(machine()->Float32Equal(), a, b);
   }
   Node* Float32NotEqual(Node* a, Node* b) {
-    return WordBinaryNot(Float32Equal(a, b));
+    return Word32BinaryNot(Float32Equal(a, b));
   }
   Node* Float32LessThan(Node* a, Node* b) {
     return AddNode(machine()->Float32LessThan(), a, b);
@@ -403,7 +419,7 @@
     return AddNode(machine()->Float64Equal(), a, b);
   }
   Node* Float64NotEqual(Node* a, Node* b) {
-    return WordBinaryNot(Float64Equal(a, b));
+    return Word32BinaryNot(Float64Equal(a, b));
   }
   Node* Float64LessThan(Node* a, Node* b) {
     return AddNode(machine()->Float64LessThan(), a, b);
@@ -432,10 +448,11 @@
   Node* ChangeFloat64ToUint32(Node* a) {
     return AddNode(machine()->ChangeFloat64ToUint32(), a);
   }
-  Node* TruncateFloat32ToInt64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+  Node* TruncateFloat32ToInt32(Node* a) {
+    return AddNode(machine()->TruncateFloat32ToInt32(), a);
+  }
+  Node* TruncateFloat32ToUint32(Node* a) {
+    return AddNode(machine()->TruncateFloat32ToUint32(), a);
   }
   Node* TryTruncateFloat32ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
@@ -448,11 +465,6 @@
   Node* TryTruncateFloat64ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
   }
-  Node* TruncateFloat32ToUint64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
-  }
   Node* TryTruncateFloat32ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
   }
@@ -479,12 +491,18 @@
   Node* TruncateInt64ToInt32(Node* a) {
     return AddNode(machine()->TruncateInt64ToInt32(), a);
   }
+  Node* RoundInt32ToFloat32(Node* a) {
+    return AddNode(machine()->RoundInt32ToFloat32(), a);
+  }
   Node* RoundInt64ToFloat32(Node* a) {
     return AddNode(machine()->RoundInt64ToFloat32(), a);
   }
   Node* RoundInt64ToFloat64(Node* a) {
     return AddNode(machine()->RoundInt64ToFloat64(), a);
   }
+  Node* RoundUint32ToFloat32(Node* a) {
+    return AddNode(machine()->RoundUint32ToFloat32(), a);
+  }
   Node* RoundUint64ToFloat32(Node* a) {
     return AddNode(machine()->RoundUint64ToFloat32(), a);
   }
@@ -548,6 +566,9 @@
   // Stack operations.
   Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
   Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
+  Node* LoadParentFramePointer() {
+    return AddNode(machine()->LoadParentFramePointer());
+  }
 
   // Parameters.
   Node* Parameter(size_t index);
@@ -568,11 +589,16 @@
   // Call a given call descriptor and the given arguments and frame-state.
   Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
                             Node* frame_state);
+  // Call to a runtime function with zero arguments.
+  Node* CallRuntime0(Runtime::FunctionId function, Node* context);
   // Call to a runtime function with one arguments.
   Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
   // Call to a runtime function with two arguments.
   Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
                      Node* context);
+  // Call to a runtime function with three arguments.
+  Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                     Node* arg3, Node* context);
   // Call to a runtime function with four arguments.
   Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
                      Node* arg3, Node* arg4, Node* context);
@@ -602,7 +628,12 @@
   // Tail call to a runtime function with two arguments.
   Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
                          Node* context);
-
+  // Tail call to a runtime function with three arguments.
+  Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* context);
+  // Tail call to a runtime function with four arguments.
+  Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* arg4, Node* context);
 
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
@@ -622,24 +653,26 @@
 
   // Variables.
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
-    return AddNode(common()->Phi(rep, 2), n1, n2);
+    return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
   }
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
-    return AddNode(common()->Phi(rep, 3), n1, n2, n3);
+    return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start());
   }
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
-    return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
+    return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start());
   }
+  Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs);
+  void AppendPhiInput(Node* phi, Node* new_input);
 
   // ===========================================================================
   // The following generic node creation methods can be used for operators that
   // are not covered by the above utility methods. There should rarely be a need
   // to do that outside of testing though.
 
-  Node* AddNode(const Operator* op, int input_count, Node** inputs);
+  Node* AddNode(const Operator* op, int input_count, Node* const* inputs);
 
   Node* AddNode(const Operator* op) {
-    return AddNode(op, 0, static_cast<Node**>(nullptr));
+    return AddNode(op, 0, static_cast<Node* const*>(nullptr));
   }
 
   template <class... TArgs>
@@ -649,7 +682,7 @@
   }
 
  private:
-  Node* MakeNode(const Operator* op, int input_count, Node** inputs);
+  Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
   BasicBlock* Use(RawMachineLabel* label);
   BasicBlock* EnsureBlock(RawMachineLabel* label);
   BasicBlock* CurrentBlock();
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 463795e..0b12e14 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -578,7 +578,26 @@
             CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
           }
           if (pred_val.second->succ_vreg != kInvalidVreg) {
-            CHECK_EQ(succ_vreg, pred_val.second->succ_vreg);
+            if (succ_vreg != pred_val.second->succ_vreg) {
+              // When a block introduces 2 identical phis A and B, and both are
+              // operands to other phis C and D, and we optimized the moves
+              // defining A or B such that they now appear in the block defining
+              // A and B, the back propagation will get confused when visiting
+              // upwards from C and D. The operand in the block defining A and B
+              // will be attributed to C (or D, depending which of these is
+              // visited first).
+              CHECK(IsPhi(pred_val.second->succ_vreg));
+              CHECK(IsPhi(succ_vreg));
+              const PhiData* current_phi = GetPhi(succ_vreg);
+              const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
+              CHECK_EQ(current_phi->operands.size(),
+                       assigned_phi->operands.size());
+              CHECK_EQ(current_phi->definition_rpo,
+                       assigned_phi->definition_rpo);
+              for (size_t i = 0; i < current_phi->operands.size(); ++i) {
+                CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
+              }
+            }
           } else {
             pred_val.second->succ_vreg = succ_vreg;
             block_ids.insert(pred_rpo.ToSize());
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 232ad9f..02ba1f1 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -104,6 +104,8 @@
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat64:
       return 8;
+    case MachineRepresentation::kSimd128:
+      return 16;
     case MachineRepresentation::kNone:
       break;
   }
@@ -113,6 +115,165 @@
 
 }  // namespace
 
+class LiveRangeBound {
+ public:
+  explicit LiveRangeBound(LiveRange* range, bool skip)
+      : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
+    DCHECK(!range->IsEmpty());
+  }
+
+  bool CanCover(LifetimePosition position) {
+    return start_ <= position && position < end_;
+  }
+
+  LiveRange* const range_;
+  const LifetimePosition start_;
+  const LifetimePosition end_;
+  const bool skip_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
+};
+
+
+struct FindResult {
+  LiveRange* cur_cover_;
+  LiveRange* pred_cover_;
+};
+
+
+class LiveRangeBoundArray {
+ public:
+  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+
+  bool ShouldInitialize() { return start_ == nullptr; }
+
+  void Initialize(Zone* zone, TopLevelLiveRange* range) {
+    length_ = range->GetChildCount();
+
+    start_ = zone->NewArray<LiveRangeBound>(length_);
+    LiveRangeBound* curr = start_;
+    // Normally, spilled ranges do not need connecting moves, because the spill
+    // location has been assigned at definition. For ranges spilled in deferred
+    // blocks, that is not the case, so we need to connect the spilled children.
+    for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+      new (curr) LiveRangeBound(i, i->spilled());
+    }
+  }
+
+  LiveRangeBound* Find(const LifetimePosition position) const {
+    size_t left_index = 0;
+    size_t right_index = length_;
+    while (true) {
+      size_t current_index = left_index + (right_index - left_index) / 2;
+      DCHECK(right_index > current_index);
+      LiveRangeBound* bound = &start_[current_index];
+      if (bound->start_ <= position) {
+        if (position < bound->end_) return bound;
+        DCHECK(left_index < current_index);
+        left_index = current_index;
+      } else {
+        right_index = current_index;
+      }
+    }
+  }
+
+  LiveRangeBound* FindPred(const InstructionBlock* pred) {
+    LifetimePosition pred_end =
+        LifetimePosition::InstructionFromInstructionIndex(
+            pred->last_instruction_index());
+    return Find(pred_end);
+  }
+
+  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+    LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
+        succ->first_instruction_index());
+    return Find(succ_start);
+  }
+
+  bool FindConnectableSubranges(const InstructionBlock* block,
+                                const InstructionBlock* pred,
+                                FindResult* result) const {
+    LifetimePosition pred_end =
+        LifetimePosition::InstructionFromInstructionIndex(
+            pred->last_instruction_index());
+    LiveRangeBound* bound = Find(pred_end);
+    result->pred_cover_ = bound->range_;
+    LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
+        block->first_instruction_index());
+
+    if (bound->CanCover(cur_start)) {
+      // Both blocks are covered by the same range, so there is nothing to
+      // connect.
+      return false;
+    }
+    bound = Find(cur_start);
+    if (bound->skip_) {
+      return false;
+    }
+    result->cur_cover_ = bound->range_;
+    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+    return (result->cur_cover_ != result->pred_cover_);
+  }
+
+ private:
+  size_t length_;
+  LiveRangeBound* start_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
+};
+
+
+class LiveRangeFinder {
+ public:
+  explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
+      : data_(data),
+        bounds_length_(static_cast<int>(data_->live_ranges().size())),
+        bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
+        zone_(zone) {
+    for (int i = 0; i < bounds_length_; ++i) {
+      new (&bounds_[i]) LiveRangeBoundArray();
+    }
+  }
+
+  LiveRangeBoundArray* ArrayFor(int operand_index) {
+    DCHECK(operand_index < bounds_length_);
+    TopLevelLiveRange* range = data_->live_ranges()[operand_index];
+    DCHECK(range != nullptr && !range->IsEmpty());
+    LiveRangeBoundArray* array = &bounds_[operand_index];
+    if (array->ShouldInitialize()) {
+      array->Initialize(zone_, range);
+    }
+    return array;
+  }
+
+ private:
+  const RegisterAllocationData* const data_;
+  const int bounds_length_;
+  LiveRangeBoundArray* const bounds_;
+  Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
+};
+
+
+typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
+
+
+struct DelayedInsertionMapCompare {
+  bool operator()(const DelayedInsertionMapKey& a,
+                  const DelayedInsertionMapKey& b) const {
+    if (a.first == b.first) {
+      return a.second.Compare(b.second);
+    }
+    return a.first < b.first;
+  }
+};
+
+
+typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
+                DelayedInsertionMapCompare> DelayedInsertionMap;
+
 
 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
                          void* hint, UsePositionHintType hint_type)
@@ -734,51 +895,13 @@
       gap_index, operand, spill_move_insertion_locations_);
 }
 
-
-bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
-    InstructionSequence* code, const InstructionOperand& spill_operand) {
-  if (!IsSpilledOnlyInDeferredBlocks()) return false;
-
-  TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
-  // If we have ranges that aren't spilled but require the operand on the stack,
-  // make sure we insert the spill.
-  for (const LiveRange* child = this; child != nullptr; child = child->next()) {
-    if (!child->spilled() &&
-        child->NextSlotPosition(child->Start()) != nullptr) {
-      Instruction* instr =
-          code->InstructionAt(child->Start().ToInstructionIndex());
-      // Insert spill at the end to let live range connections happen at START.
-      ParallelMove* move =
-          instr->GetOrCreateParallelMove(Instruction::END, code->zone());
-      InstructionOperand assigned = child->GetAssignedOperand();
-      if (TopLevel()->has_slot_use()) {
-        bool found = false;
-        for (MoveOperands* move_op : *move) {
-          if (move_op->IsEliminated()) continue;
-          if (move_op->source().Equals(assigned) &&
-              move_op->destination().Equals(spill_operand)) {
-            found = true;
-            break;
-          }
-        }
-        if (found) continue;
-      }
-
-      move->AddMove(assigned, spill_operand);
-    }
-  }
-
-  return true;
-}
-
-
 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
                                          const InstructionOperand& op,
                                          bool might_be_duplicated) {
-  DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+  DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
   Zone* zone = sequence->zone();
 
-  for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+  for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
        to_spill != nullptr; to_spill = to_spill->next) {
     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
     ParallelMove* move =
@@ -2321,12 +2444,15 @@
 
   const InstructionBlock* block = end_block;
   // Find header of outermost loop.
-  // TODO(titzer): fix redundancy below.
-  while (GetContainingLoop(code(), block) != nullptr &&
-         GetContainingLoop(code(), block)->rpo_number().ToInt() >
-             start_block->rpo_number().ToInt()) {
-    block = GetContainingLoop(code(), block);
-  }
+  do {
+    const InstructionBlock* loop = GetContainingLoop(code(), block);
+    if (loop == nullptr ||
+        loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
+      // No more loops or loop starts before the lifetime start.
+      break;
+    }
+    block = loop;
+  } while (true);
 
   // We did not find any suitable outer loop. Split at the latest possible
   // position unless end_block is a loop header itself.
@@ -2965,7 +3091,7 @@
       }
     } else {
       TopLevelLiveRange::SpillMoveInsertionList* spills =
-          range->spill_move_insertion_locations();
+          range->GetSpillMoveInsertionLocations();
       DCHECK_NOT_NULL(spills);
       for (; spills != nullptr; spills = spills->next) {
         code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
@@ -3032,12 +3158,10 @@
       // connecting move when a successor child range is spilled - because the
       // spilled range picks up its value from the slot which was assigned at
       // definition. For ranges that are determined to spill only in deferred
-      // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
-      // moves between ranges. Because of how the ranges are split around
-      // deferred blocks, this amounts to spilling and filling inside such
-      // blocks.
-      if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
-                                                    spill_operand)) {
+      // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
+      // where a spill operand is expected, and then finalize by inserting the
+      // spills in the deferred blocks dominators.
+      if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
         // Spill at definition if the range isn't spilled only in deferred
         // blocks.
         top_range->CommitSpillMoves(
@@ -3188,171 +3312,6 @@
 }
 
 
-namespace {
-
-class LiveRangeBound {
- public:
-  explicit LiveRangeBound(const LiveRange* range, bool skip)
-      : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
-    DCHECK(!range->IsEmpty());
-  }
-
-  bool CanCover(LifetimePosition position) {
-    return start_ <= position && position < end_;
-  }
-
-  const LiveRange* const range_;
-  const LifetimePosition start_;
-  const LifetimePosition end_;
-  const bool skip_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
-};
-
-
-struct FindResult {
-  const LiveRange* cur_cover_;
-  const LiveRange* pred_cover_;
-};
-
-
-class LiveRangeBoundArray {
- public:
-  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
-
-  bool ShouldInitialize() { return start_ == nullptr; }
-
-  void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
-    length_ = range->GetChildCount();
-
-    start_ = zone->NewArray<LiveRangeBound>(length_);
-    LiveRangeBound* curr = start_;
-    // Normally, spilled ranges do not need connecting moves, because the spill
-    // location has been assigned at definition. For ranges spilled in deferred
-    // blocks, that is not the case, so we need to connect the spilled children.
-    bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
-    for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
-      new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
-    }
-  }
-
-  LiveRangeBound* Find(const LifetimePosition position) const {
-    size_t left_index = 0;
-    size_t right_index = length_;
-    while (true) {
-      size_t current_index = left_index + (right_index - left_index) / 2;
-      DCHECK(right_index > current_index);
-      LiveRangeBound* bound = &start_[current_index];
-      if (bound->start_ <= position) {
-        if (position < bound->end_) return bound;
-        DCHECK(left_index < current_index);
-        left_index = current_index;
-      } else {
-        right_index = current_index;
-      }
-    }
-  }
-
-  LiveRangeBound* FindPred(const InstructionBlock* pred) {
-    LifetimePosition pred_end =
-        LifetimePosition::InstructionFromInstructionIndex(
-            pred->last_instruction_index());
-    return Find(pred_end);
-  }
-
-  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
-    LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
-        succ->first_instruction_index());
-    return Find(succ_start);
-  }
-
-  bool FindConnectableSubranges(const InstructionBlock* block,
-                                const InstructionBlock* pred,
-                                FindResult* result) const {
-    LifetimePosition pred_end =
-        LifetimePosition::InstructionFromInstructionIndex(
-            pred->last_instruction_index());
-    LiveRangeBound* bound = Find(pred_end);
-    result->pred_cover_ = bound->range_;
-    LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
-        block->first_instruction_index());
-
-    if (bound->CanCover(cur_start)) {
-      // Both blocks are covered by the same range, so there is nothing to
-      // connect.
-      return false;
-    }
-    bound = Find(cur_start);
-    if (bound->skip_) {
-      return false;
-    }
-    result->cur_cover_ = bound->range_;
-    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
-    return (result->cur_cover_ != result->pred_cover_);
-  }
-
- private:
-  size_t length_;
-  LiveRangeBound* start_;
-
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
-};
-
-
-class LiveRangeFinder {
- public:
-  explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
-      : data_(data),
-        bounds_length_(static_cast<int>(data_->live_ranges().size())),
-        bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
-        zone_(zone) {
-    for (int i = 0; i < bounds_length_; ++i) {
-      new (&bounds_[i]) LiveRangeBoundArray();
-    }
-  }
-
-  LiveRangeBoundArray* ArrayFor(int operand_index) {
-    DCHECK(operand_index < bounds_length_);
-    TopLevelLiveRange* range = data_->live_ranges()[operand_index];
-    DCHECK(range != nullptr && !range->IsEmpty());
-    LiveRangeBoundArray* array = &bounds_[operand_index];
-    if (array->ShouldInitialize()) {
-      array->Initialize(zone_, range);
-    }
-    return array;
-  }
-
- private:
-  const RegisterAllocationData* const data_;
-  const int bounds_length_;
-  LiveRangeBoundArray* const bounds_;
-  Zone* const zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
-};
-
-
-typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
-
-
-struct DelayedInsertionMapCompare {
-  bool operator()(const DelayedInsertionMapKey& a,
-                  const DelayedInsertionMapKey& b) const {
-    if (a.first == b.first) {
-      return a.second.Compare(b.second);
-    }
-    return a.first < b.first;
-  }
-};
-
-
-typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
-                DelayedInsertionMapCompare> DelayedInsertionMap;
-
-}  // namespace
-
-
 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
     : data_(data) {}
 
@@ -3383,6 +3342,41 @@
         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
         if (pred_op.Equals(cur_op)) continue;
+        if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
+          // We're doing a reload.
+          // We don't need to, if:
+          // 1) there's no register use in this block, and
+          // 2) the range ends before the block does, and
+          // 3) we don't have a successor, or the successor is spilled.
+          LifetimePosition block_start =
+              LifetimePosition::GapFromInstructionIndex(block->code_start());
+          LifetimePosition block_end =
+              LifetimePosition::GapFromInstructionIndex(block->code_end());
+          const LiveRange* current = result.cur_cover_;
+          const LiveRange* successor = current->next();
+          if (current->End() < block_end &&
+              (successor == nullptr || successor->spilled())) {
+            // verify point 1: no register use. We can go to the end of the
+            // range, since it's all within the block.
+
+            bool uses_reg = false;
+            for (const UsePosition* use = current->NextUsePosition(block_start);
+                 use != nullptr; use = use->next()) {
+              if (use->operand()->IsAnyRegister()) {
+                uses_reg = true;
+                break;
+              }
+            }
+            if (!uses_reg) continue;
+          }
+          if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+              pred_block->IsDeferred()) {
+            // The spill location should be defined in pred_block, so add
+            // pred_block to the list of blocks requiring a spill operand.
+            current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
+                pred_block->rpo_number().ToInt());
+          }
+        }
         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
         USE(move_loc);
         DCHECK_IMPLIES(
@@ -3393,6 +3387,16 @@
       iterator.Advance();
     }
   }
+
+  // At this stage, we collected blocks needing a spill operand from
+  // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
+  // deferred blocks.
+  for (TopLevelLiveRange* top : data()->live_ranges()) {
+    if (top == nullptr || top->IsEmpty() ||
+        !top->IsSpilledOnlyInDeferredBlocks())
+      continue;
+    CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
+  }
 }
 
 
@@ -3430,7 +3434,7 @@
       LifetimePosition pos = second_range->Start();
       // Add gap move if the two live ranges touch and there is no block
       // boundary.
-      if (!connect_spilled && second_range->spilled()) continue;
+      if (second_range->spilled()) continue;
       if (first_range->End() != pos) continue;
       if (data()->IsBlockBoundary(pos) &&
           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
@@ -3442,6 +3446,16 @@
       bool delay_insertion = false;
       Instruction::GapPosition gap_pos;
       int gap_index = pos.ToInstructionIndex();
+      if (connect_spilled && !prev_operand.IsAnyRegister() &&
+          cur_operand.IsAnyRegister()) {
+        const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
+        DCHECK(block->IsDeferred());
+        // Performing a reload in this block, meaning the spill operand must
+        // be defined here.
+        top_range->GetListOfBlocksRequiringSpillOperands()->Add(
+            block->rpo_number().ToInt());
+      }
+
       if (pos.IsGapPosition()) {
         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
       } else {
@@ -3452,7 +3466,7 @@
         }
         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
       }
-      // Fills or spills for spilled in deferred blocks ranges must happen
+      // Reloads or spills for spilled in deferred blocks ranges must happen
       // only in deferred blocks.
       DCHECK_IMPLIES(
           connect_spilled &&
@@ -3503,6 +3517,73 @@
 }
 
 
+void LiveRangeConnector::CommitSpillsInDeferredBlocks(
+    TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
+  DCHECK(range->IsSpilledOnlyInDeferredBlocks());
+  DCHECK(!range->spilled());
+
+  InstructionSequence* code = data()->code();
+  InstructionOperand spill_operand = range->GetSpillRangeOperand();
+
+  TRACE("Live Range %d will be spilled only in deferred blocks.\n",
+        range->vreg());
+  // If we have ranges that aren't spilled but require the operand on the stack,
+  // make sure we insert the spill.
+  for (const LiveRange* child = range; child != nullptr;
+       child = child->next()) {
+    for (const UsePosition* pos = child->first_pos(); pos != nullptr;
+         pos = pos->next()) {
+      if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
+        continue;
+      range->AddBlockRequiringSpillOperand(
+          code->GetInstructionBlock(pos->pos().ToInstructionIndex())
+              ->rpo_number());
+    }
+  }
+
+  ZoneQueue<int> worklist(temp_zone);
+
+  for (BitVector::Iterator iterator(
+           range->GetListOfBlocksRequiringSpillOperands());
+       !iterator.Done(); iterator.Advance()) {
+    worklist.push(iterator.Current());
+  }
+
+  // Seek the deferred blocks that dominate locations requiring spill operands,
+  // and spill there. We only need to spill at the start of such blocks.
+  BitVector done_blocks(
+      range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
+  while (!worklist.empty()) {
+    int block_id = worklist.front();
+    worklist.pop();
+    if (done_blocks.Contains(block_id)) continue;
+    done_blocks.Add(block_id);
+    const InstructionBlock* spill_block =
+        code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+
+    for (const RpoNumber& pred : spill_block->predecessors()) {
+      const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
+
+      if (pred_block->IsDeferred()) {
+        worklist.push(pred_block->rpo_number().ToInt());
+      } else {
+        LifetimePosition pred_end =
+            LifetimePosition::InstructionFromInstructionIndex(
+                pred_block->last_instruction_index());
+
+        LiveRangeBound* bound = array->Find(pred_end);
+
+        InstructionOperand pred_op = bound->range_->GetAssignedOperand();
+
+        data()->AddGapMove(spill_block->first_instruction_index(),
+                           Instruction::GapPosition::START, pred_op,
+                           spill_operand);
+      }
+    }
+  }
+}
+
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index b96a43c..38fad05 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -579,14 +579,17 @@
   // and instead let the LiveRangeConnector perform the spills within the
   // deferred blocks. If so, we insert here spills for non-spilled ranges
   // with slot use positions.
-  void MarkSpilledInDeferredBlock() {
+  void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
     spill_start_index_ = -1;
     spilled_in_deferred_blocks_ = true;
     spill_move_insertion_locations_ = nullptr;
+    list_of_blocks_requiring_spill_operands_ =
+        new (zone) BitVector(total_block_count, zone);
   }
 
-  bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
-                                     const InstructionOperand& spill_operand);
+  void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
+                                   const InstructionOperand& spill_operand,
+                                   BitVector* necessary_spill_points);
 
   TopLevelLiveRange* splintered_from() const { return splintered_from_; }
   bool IsSplinter() const { return splintered_from_ != nullptr; }
@@ -617,7 +620,8 @@
 
   struct SpillMoveInsertionList;
 
-  SpillMoveInsertionList* spill_move_insertion_locations() const {
+  SpillMoveInsertionList* GetSpillMoveInsertionLocations() const {
+    DCHECK(!IsSpilledOnlyInDeferredBlocks());
     return spill_move_insertion_locations_;
   }
   TopLevelLiveRange* splinter() const { return splinter_; }
@@ -634,6 +638,16 @@
   void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
   bool has_preassigned_slot() const { return has_preassigned_slot_; }
 
+  void AddBlockRequiringSpillOperand(RpoNumber block_id) {
+    DCHECK(IsSpilledOnlyInDeferredBlocks());
+    GetListOfBlocksRequiringSpillOperands()->Add(block_id.ToInt());
+  }
+
+  BitVector* GetListOfBlocksRequiringSpillOperands() const {
+    DCHECK(IsSpilledOnlyInDeferredBlocks());
+    return list_of_blocks_requiring_spill_operands_;
+  }
+
  private:
   void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
 
@@ -650,7 +664,12 @@
     InstructionOperand* spill_operand_;
     SpillRange* spill_range_;
   };
-  SpillMoveInsertionList* spill_move_insertion_locations_;
+
+  union {
+    SpillMoveInsertionList* spill_move_insertion_locations_;
+    BitVector* list_of_blocks_requiring_spill_operands_;
+  };
+
   // TODO(mtrofin): generalize spilling after definition, currently specialized
   // just for spill in a single deferred block.
   bool spilled_in_deferred_blocks_;
@@ -1125,6 +1144,7 @@
 };
 
 
+class LiveRangeBoundArray;
 // Insert moves of the form
 //
 //          Operand(child_(k+1)) = Operand(child_k)
@@ -1157,6 +1177,10 @@
                          const InstructionBlock* pred,
                          const InstructionOperand& pred_op);
 
+  void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range,
+                                    LiveRangeBoundArray* array,
+                                    Zone* temp_zone);
+
   RegisterAllocationData* const data_;
 
   DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 5dab60f..2f7720b 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -97,7 +97,6 @@
 
 namespace {
 
-// TODO(titzer): should Word64 also be implicitly convertable to others?
 bool IsWord(MachineRepresentation rep) {
   return rep == MachineRepresentation::kWord8 ||
          rep == MachineRepresentation::kWord16 ||
@@ -146,6 +145,9 @@
       return GetWord32RepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kWord64:
       return GetWord64RepresentationFor(node, output_rep, output_type);
+    case MachineRepresentation::kSimd128:  // Fall through.
+      // TODO(bbudge) Handle conversions between tagged and untagged.
+      break;
     case MachineRepresentation::kNone:
       return node;
   }
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 653fea8..ed7fe9d 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -142,6 +142,7 @@
     return UseInfo::TruncatingWord32();
     case MachineRepresentation::kBit:
       return UseInfo::Bool();
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       break;
   }
@@ -199,6 +200,9 @@
     case MachineRepresentation::kFloat64:
       return r2 == MachineRepresentation::kFloat64 ||
              r2 == MachineRepresentation::kTagged;
+    case MachineRepresentation::kSimd128:
+      return r2 == MachineRepresentation::kSimd128 ||
+             r2 == MachineRepresentation::kTagged;
     case MachineRepresentation::kTagged:
       return r2 == MachineRepresentation::kTagged;
   }
@@ -1189,10 +1193,18 @@
                   NodeOutputInfo(access.machine_type().representation(),
                                  NodeProperties::GetType(node));
             } else {
+              if (access.machine_type().representation() !=
+                  MachineRepresentation::kFloat64) {
+                // TODO(bmeurer): See comment on abort_compilation_.
+                if (lower()) lowering->abort_compilation_ = true;
+              }
               output_info = NodeOutputInfo::Float64();
             }
           }
         } else {
+          // TODO(bmeurer): See comment on abort_compilation_.
+          if (lower()) lowering->abort_compilation_ = true;
+
           // If undefined is not truncated away, we need to have the tagged
           // representation.
           output_info = NodeOutputInfo::AnyTagged();
@@ -1237,13 +1249,16 @@
       case IrOpcode::kObjectIsNumber: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, NodeOutputInfo::Bool());
-        if (lower()) lowering->DoObjectIsNumber(node);
+        break;
+      }
+      case IrOpcode::kObjectIsReceiver: {
+        ProcessInput(node, 0, UseInfo::AnyTagged());
+        SetOutput(node, NodeOutputInfo::Bool());
         break;
       }
       case IrOpcode::kObjectIsSmi: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, NodeOutputInfo::Bool());
-        if (lower()) lowering->DoObjectIsSmi(node);
         break;
       }
 
@@ -1388,6 +1403,7 @@
       case IrOpcode::kFloat64RoundDown:
       case IrOpcode::kFloat64RoundTruncate:
       case IrOpcode::kFloat64RoundTiesAway:
+      case IrOpcode::kFloat64RoundUp:
         return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
@@ -1402,6 +1418,7 @@
                           NodeOutputInfo::Float64());
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
+      case IrOpcode::kLoadParentFramePointer:
         return VisitLeaf(node, NodeOutputInfo::Pointer());
       case IrOpcode::kStateValues:
         VisitStateValues(node);
@@ -1579,42 +1596,6 @@
 }
 
 
-void SimplifiedLowering::DoObjectIsNumber(Node* node) {
-  Node* input = NodeProperties::GetValueInput(node, 0);
-  // TODO(bmeurer): Optimize somewhat based on input type.
-  Node* check =
-      graph()->NewNode(machine()->WordEqual(),
-                       graph()->NewNode(machine()->WordAnd(), input,
-                                        jsgraph()->IntPtrConstant(kSmiTagMask)),
-                       jsgraph()->IntPtrConstant(kSmiTag));
-  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->Int32Constant(1);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(
-      machine()->WordEqual(),
-      graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), input,
-          jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-          graph()->start(), if_false),
-      jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
-  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->ReplaceInput(0, vtrue);
-  node->AppendInput(graph()->zone(), vfalse);
-  node->AppendInput(graph()->zone(), control);
-  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-}
-
-
-void SimplifiedLowering::DoObjectIsSmi(Node* node) {
-  node->ReplaceInput(0,
-                     graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
-                                      jsgraph()->IntPtrConstant(kSmiTagMask)));
-  node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
-  NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
 Node* SimplifiedLowering::StringComparison(Node* node) {
   Operator::Properties properties = node->op()->properties();
   Callable callable = CodeFactory::StringCompare(isolate());
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index f9410f8..358bd97 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -36,13 +36,16 @@
   void DoLoadBuffer(Node* node, MachineRepresentation rep,
                     RepresentationChanger* changer);
   void DoStoreBuffer(Node* node);
-  void DoObjectIsNumber(Node* node);
-  void DoObjectIsSmi(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
   void DoStringEqual(Node* node);
   void DoStringLessThan(Node* node);
   void DoStringLessThanOrEqual(Node* node);
 
+  // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
+  // typing hack to support the gigantic "asm.js should be fast without proper
+  // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
+  bool abort_compilation_ = false;
+
  private:
   JSGraph* const jsgraph_;
   Zone* const zone_;
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 1eaa287..c7abe9c 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -7,7 +7,7 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -187,6 +187,7 @@
   V(ChangeBoolToBit, Operator::kNoProperties, 1)         \
   V(ChangeBitToBool, Operator::kNoProperties, 1)         \
   V(ObjectIsNumber, Operator::kNoProperties, 1)          \
+  V(ObjectIsReceiver, Operator::kNoProperties, 1)        \
   V(ObjectIsSmi, Operator::kNoProperties, 1)
 
 #define NO_THROW_OP_LIST(V)                 \
@@ -253,7 +254,6 @@
 
 
 const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
-  // TODO(titzer): What about the type parameter?
   return new (zone()) Operator(IrOpcode::kReferenceEqual,
                                Operator::kCommutative | Operator::kPure,
                                "ReferenceEqual", 2, 0, 0, 1, 0, 0);
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 3821a6d..2ed4b5f 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -15,10 +15,7 @@
 namespace internal {
 
 // Forward declarations.
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
+class Type;
 class Zone;
 
 
@@ -168,6 +165,7 @@
   const Operator* ChangeBitToBool();
 
   const Operator* ObjectIsNumber();
+  const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
 
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index c1f816d..9679513 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -29,10 +29,8 @@
   Typer* const typer_;
 };
 
-
 Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
-             CompilationDependencies* dependencies,
-             Type::FunctionType* function_type)
+             CompilationDependencies* dependencies, FunctionType* function_type)
     : isolate_(isolate),
       graph_(graph),
       flags_(flags),
@@ -243,11 +241,14 @@
   static Type* NumberToInt32(Type*, Typer*);
   static Type* NumberToUint32(Type*, Typer*);
 
-  static Type* JSAddRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSSubtractRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSMultiplyRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* ObjectIsNumber(Type*, Typer*);
+  static Type* ObjectIsReceiver(Type*, Typer*);
+  static Type* ObjectIsSmi(Type*, Typer*);
+
+  static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
 
   static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
 
@@ -508,14 +509,36 @@
 }
 
 
+// Type checks.
+
+
+Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
+  if (type->Is(Type::Number())) return t->singleton_true_;
+  if (!type->Maybe(Type::Number())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsReceiver(Type* type, Typer* t) {
+  if (type->Is(Type::Receiver())) return t->singleton_true_;
+  if (!type->Maybe(Type::Receiver())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
+  if (type->Is(Type::TaggedSigned())) return t->singleton_true_;
+  if (type->Is(Type::TaggedPointer())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
 // -----------------------------------------------------------------------------
 
 
 // Control operators.
 
-
-Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(zone()); }
-
+Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
 
@@ -524,7 +547,7 @@
 
 
 Type* Typer::Visitor::TypeParameter(Node* node) {
-  if (Type::FunctionType* function_type = typer_->function_type()) {
+  if (FunctionType* function_type = typer_->function_type()) {
     int const index = ParameterIndexOf(node->op());
     if (index >= 0 && index < function_type->Arity()) {
       return function_type->Parameter(index);
@@ -578,7 +601,7 @@
 
 
 Type* Typer::Visitor::TypeExternalConstant(Node* node) {
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
 
@@ -627,22 +650,15 @@
 
 Type* Typer::Visitor::TypeFrameState(Node* node) {
   // TODO(rossberg): Ideally FrameState wouldn't have a value output.
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeStateValues(Node* node) { return Type::Internal(); }
 
-Type* Typer::Visitor::TypeStateValues(Node* node) {
-  return Type::Internal(zone());
-}
-
-
-Type* Typer::Visitor::TypeObjectState(Node* node) {
-  return Type::Internal(zone());
-}
-
+Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
 
@@ -650,7 +666,12 @@
 
 
 Type* Typer::Visitor::TypeProjection(Node* node) {
-  // TODO(titzer): use the output type of the input to determine the bounds.
+  Type* const type = Operand(node, 0);
+  if (type->Is(Type::None())) return Type::None();
+  int const index = static_cast<int>(ProjectionIndexOf(node->op()));
+  if (type->IsTuple() && index < type->AsTuple()->Arity()) {
+    return type->AsTuple()->Element(index);
+  }
   return Type::Any();
 }
 
@@ -950,9 +971,7 @@
   return x == 0 ? 0 : x;  // -0 -> 0
 }
 
-
-Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
-                                  Typer* t) {
+Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
   double results[4];
   results[0] = lhs->Min() + rhs->Min();
   results[1] = lhs->Min() + rhs->Max();
@@ -998,9 +1017,8 @@
   return Type::Number();
 }
 
-
-Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
-                                       Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
+                                       Typer* t) {
   double results[4];
   results[0] = lhs->Min() - rhs->Min();
   results[1] = lhs->Min() - rhs->Max();
@@ -1037,41 +1055,38 @@
 }
 
 
-Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
-                                       Type::RangeType* rhs, Typer* t) {
-  double results[4];
-  double lmin = lhs->Min();
-  double lmax = lhs->Max();
-  double rmin = rhs->Min();
-  double rmax = rhs->Max();
-  results[0] = lmin * rmin;
-  results[1] = lmin * rmax;
-  results[2] = lmax * rmin;
-  results[3] = lmax * rmax;
-  // If the result may be nan, we give up on calculating a precise type, because
-  // the discontinuity makes it too complicated.  Note that even if none of the
-  // "results" above is nan, the actual result may still be, so we have to do a
-  // different check:
-  bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
-                    (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
-                   (rhs->Maybe(t->cache_.kSingletonZero) &&
-                    (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
-  if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
-  bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
-                         (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
-  Type* range =
-      Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
-  return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
-                         : range;
-}
-
-
 Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
   lhs = Rangify(ToNumber(lhs, t), t);
   rhs = Rangify(ToNumber(rhs, t), t);
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
   if (lhs->IsRange() && rhs->IsRange()) {
-    return JSMultiplyRanger(lhs->AsRange(), rhs->AsRange(), t);
+    double results[4];
+    double lmin = lhs->AsRange()->Min();
+    double lmax = lhs->AsRange()->Max();
+    double rmin = rhs->AsRange()->Min();
+    double rmax = rhs->AsRange()->Max();
+    results[0] = lmin * rmin;
+    results[1] = lmin * rmax;
+    results[2] = lmax * rmin;
+    results[3] = lmax * rmax;
+    // If the result may be nan, we give up on calculating a precise type,
+    // because
+    // the discontinuity makes it too complicated.  Note that even if none of
+    // the
+    // "results" above is nan, the actual result may still be, so we have to do
+    // a
+    // different check:
+    bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
+                      (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+                     (rhs->Maybe(t->cache_.kSingletonZero) &&
+                      (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+    if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
+    bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
+                           (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
+    Type* range =
+        Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
+    return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
+                           : range;
   }
   return Type::Number();
 }
@@ -1090,9 +1105,8 @@
   return maybe_nan ? Type::Number() : Type::OrderedNumber();
 }
 
-
-Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
-                                      Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
+                                      Typer* t) {
   double lmin = lhs->Min();
   double lmax = lhs->Max();
   double rmin = rhs->Min();
@@ -1286,8 +1300,8 @@
     } else if (receiver->IsClass() &&
                receiver->AsClass()->Map()->IsJSFunctionMap()) {
       Handle<Map> map = receiver->AsClass()->Map();
-      return map->has_non_instance_prototype() ? Type::Primitive(zone())
-                                               : Type::Receiver(zone());
+      return map->has_non_instance_prototype() ? Type::Primitive()
+                                               : Type::Receiver();
     }
   }
   return Type::Any();
@@ -1335,8 +1349,8 @@
     // Only weaken if there is range involved; we should converge quickly
     // for all other types (the exception is a union of many constants,
     // but we currently do not increase the number of constants in unions).
-    Type::RangeType* previous = previous_integer->GetRange();
-    Type::RangeType* current = current_integer->GetRange();
+    Type* previous = previous_integer->GetRange();
+    Type* current = current_integer->GetRange();
     if (current == nullptr || previous == nullptr) {
       return current_type;
     }
@@ -1397,19 +1411,12 @@
 
 
 Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeJSHasProperty(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
 
 // JS context operators.
 
@@ -1430,9 +1437,6 @@
 }
 
 
-Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
-
-
 Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
   Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
   if (outer->Is(Type::None())) {
@@ -1525,8 +1529,14 @@
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
         // String functions.
+        case kStringCharCodeAt:
+          return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
+                             t->zone());
         case kStringCharAt:
+        case kStringConcat:
         case kStringFromCharCode:
+        case kStringToLowerCase:
+        case kStringToUpperCase:
           return Type::String();
         // Array functions.
         case kArrayIndexOf:
@@ -1550,15 +1560,15 @@
 
 Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
   switch (CallRuntimeParametersOf(node->op()).id()) {
+    case Runtime::kInlineIsJSReceiver:
+      return TypeUnaryOp(node, ObjectIsReceiver);
     case Runtime::kInlineIsSmi:
+      return TypeUnaryOp(node, ObjectIsSmi);
     case Runtime::kInlineIsArray:
     case Runtime::kInlineIsDate:
     case Runtime::kInlineIsTypedArray:
-    case Runtime::kInlineIsMinusZero:
-    case Runtime::kInlineIsFunction:
     case Runtime::kInlineIsRegExp:
-    case Runtime::kInlineIsJSReceiver:
-      return Type::Boolean(zone());
+      return Type::Boolean();
     case Runtime::kInlineDoubleLo:
     case Runtime::kInlineDoubleHi:
       return Type::Signed32();
@@ -1576,6 +1586,7 @@
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
     case Runtime::kInlineSubString:
+    case Runtime::kInlineStringCharFromCode:
       return Type::String();
     case Runtime::kInlineToInteger:
       return TypeUnaryOp(node, ToInteger);
@@ -1613,15 +1624,16 @@
 
 
 Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
-  // TODO(bmeurer): Return a tuple type here.
-  return Type::Any();
+  STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
+  Factory* const f = isolate()->factory();
+  Type* const cache_type = Type::Union(
+      typer_->cache_.kSmi, Type::Class(f->meta_map(), zone()), zone());
+  Type* const cache_array = Type::Class(f->fixed_array_map(), zone());
+  Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
+  return Type::Tuple(cache_type, cache_array, cache_length, zone());
 }
 
-
-Type* Typer::Visitor::TypeJSForInDone(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeJSForInDone(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeJSForInStep(Node* node) {
   STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
@@ -1643,82 +1655,57 @@
 
 // Simplified operators.
 
-
-Type* Typer::Visitor::TypeBooleanNot(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
   return TypeUnaryOp(node, ToNumber);
 }
 
+Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeNumberEqual(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(zone()); }
+Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeNumberSubtract(Node* node) {
-  return Type::Number(zone());
-}
+Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
 
-
-Type* Typer::Visitor::TypeNumberMultiply(Node* node) {
-  return Type::Number(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberDivide(Node* node) {
-  return Type::Number(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberModulus(Node* node) {
-  return Type::Number(zone());
-}
-
+Type* Typer::Visitor::TypeNumberModulus(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
-  return Type::Unsigned32(zone());
+  return Type::Unsigned32();
 }
 
 
@@ -1733,7 +1720,7 @@
 
 
 Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
 
@@ -1755,19 +1742,12 @@
   return TypeBinaryOp(node, ReferenceEqualTyper);
 }
 
+Type* Typer::Visitor::TypeStringEqual(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeStringEqual(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeStringLessThan(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeStringLessThan(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
 
@@ -1931,20 +1911,17 @@
 
 
 Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
-  Type* arg = Operand(node, 0);
-  if (arg->Is(Type::None())) return Type::None();
-  if (arg->Is(Type::Number())) return typer_->singleton_true_;
-  if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
-  return Type::Boolean();
+  return TypeUnaryOp(node, ObjectIsNumber);
+}
+
+
+Type* Typer::Visitor::TypeObjectIsReceiver(Node* node) {
+  return TypeUnaryOp(node, ObjectIsReceiver);
 }
 
 
 Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
-  Type* arg = Operand(node, 0);
-  if (arg->Is(Type::None())) return Type::None();
-  if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
-  if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
-  return Type::Boolean();
+  return TypeUnaryOp(node, ObjectIsSmi);
 }
 
 
@@ -1952,6 +1929,7 @@
 
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeStore(Node* node) {
   UNREACHABLE();
@@ -1989,6 +1967,11 @@
 Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
 
 
+Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
+  return Type::Integral32();
+}
+
+
 Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
   return Type::Integral32();
 }
@@ -2021,6 +2004,11 @@
 Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
 
 
+Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
+  return Type::Internal();
+}
+
+
 Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
 
 
@@ -2145,6 +2133,17 @@
 }
 
 
+Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
+  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeTruncateFloat32ToUint32(Node* node) {
+  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+                         zone());
+}
+
+
 Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
   return Type::Internal();
 }
@@ -2200,6 +2199,11 @@
 }
 
 
+Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
+  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
 Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
   return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
 }
@@ -2210,6 +2214,11 @@
 }
 
 
+Type* Typer::Visitor::TypeRoundUint32ToFloat32(Node* node) {
+  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
 Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
   return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
 }
@@ -2406,6 +2415,9 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
 
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 4177026..0982b28 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -30,7 +30,7 @@
 
   Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
         CompilationDependencies* dependencies = nullptr,
-        Type::FunctionType* function_type = nullptr);
+        FunctionType* function_type = nullptr);
   ~Typer();
 
   void Run();
@@ -46,13 +46,13 @@
   Isolate* isolate() const { return isolate_; }
   Flags flags() const { return flags_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
-  Type::FunctionType* function_type() const { return function_type_; }
+  FunctionType* function_type() const { return function_type_; }
 
   Isolate* const isolate_;
   Graph* const graph_;
   Flags const flags_;
   CompilationDependencies* const dependencies_;
-  Type::FunctionType* function_type_;
+  FunctionType* function_type_;
   Decorator* decorator_;
   TypeCache const& cache_;
 
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 1a3ef8e..99480ca 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -22,7 +22,6 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/ostreams.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -428,13 +427,20 @@
       }
       break;
     }
-    case IrOpcode::kFrameState:
+    case IrOpcode::kFrameState: {
       // TODO(jarin): what are the constraints on these?
       CHECK_EQ(5, value_count);
       CHECK_EQ(0, control_count);
       CHECK_EQ(0, effect_count);
       CHECK_EQ(6, input_count);
+      for (int i = 0; i < 3; ++i) {
+        CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
+                  IrOpcode::kStateValues ||
+              NodeProperties::GetValueInput(node, i)->opcode() ==
+                  IrOpcode::kTypedStateValues);
+      }
       break;
+    }
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedStateValues:
@@ -553,7 +559,6 @@
       break;
 
     case IrOpcode::kJSLoadContext:
-    case IrOpcode::kJSLoadDynamic:
       // Type can be anything.
       CheckUpperIs(node, Type::Any());
       break;
@@ -707,6 +712,7 @@
       break;
     }
     case IrOpcode::kObjectIsNumber:
+    case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::Boolean());
@@ -824,6 +830,7 @@
     // -----------------------
     case IrOpcode::kLoad:
     case IrOpcode::kStore:
+    case IrOpcode::kStackSlot:
     case IrOpcode::kWord32And:
     case IrOpcode::kWord32Or:
     case IrOpcode::kWord32Xor:
@@ -834,6 +841,7 @@
     case IrOpcode::kWord32Equal:
     case IrOpcode::kWord32Clz:
     case IrOpcode::kWord32Ctz:
+    case IrOpcode::kWord32ReverseBits:
     case IrOpcode::kWord32Popcnt:
     case IrOpcode::kWord64And:
     case IrOpcode::kWord64Or:
@@ -845,6 +853,7 @@
     case IrOpcode::kWord64Clz:
     case IrOpcode::kWord64Popcnt:
     case IrOpcode::kWord64Ctz:
+    case IrOpcode::kWord64ReverseBits:
     case IrOpcode::kWord64Equal:
     case IrOpcode::kInt32Add:
     case IrOpcode::kInt32AddWithOverflow:
@@ -907,8 +916,10 @@
     case IrOpcode::kFloat64LessThan:
     case IrOpcode::kFloat64LessThanOrEqual:
     case IrOpcode::kTruncateInt64ToInt32:
+    case IrOpcode::kRoundInt32ToFloat32:
     case IrOpcode::kRoundInt64ToFloat32:
     case IrOpcode::kRoundInt64ToFloat64:
+    case IrOpcode::kRoundUint32ToFloat32:
     case IrOpcode::kRoundUint64ToFloat64:
     case IrOpcode::kRoundUint64ToFloat32:
     case IrOpcode::kTruncateFloat64ToFloat32:
@@ -924,6 +935,8 @@
     case IrOpcode::kChangeFloat32ToFloat64:
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kTruncateFloat32ToInt32:
+    case IrOpcode::kTruncateFloat32ToUint32:
     case IrOpcode::kTryTruncateFloat32ToInt64:
     case IrOpcode::kTryTruncateFloat64ToInt64:
     case IrOpcode::kTryTruncateFloat32ToUint64:
@@ -934,6 +947,7 @@
     case IrOpcode::kFloat64InsertHighWord32:
     case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
+    case IrOpcode::kLoadParentFramePointer:
     case IrOpcode::kCheckedLoad:
     case IrOpcode::kCheckedStore:
       // TODO(rossberg): Check.
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 17065d6..9c3858d 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -15,6 +15,7 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/int64-lowering.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/js-operator.h"
@@ -29,6 +30,9 @@
 
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
+#include "src/factory.h"
+#include "src/log-inl.h"
+#include "src/profiler/cpu-profiler.h"
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/wasm-module.h"
@@ -105,6 +109,9 @@
   // Make the current control path trap to unreachable.
   void Unreachable() { ConnectTrap(kTrapUnreachable); }
 
+  // Always trap with the given reason.
+  void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+
   // Add a check that traps if {node} is equal to {val}.
   Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
     Int32Matcher m(node);
@@ -165,6 +172,28 @@
     *effect_ptr = before;
   }
 
+  Node* GetTrapValue(wasm::FunctionSig* sig) {
+    if (sig->return_count() > 0) {
+      switch (sig->GetReturn()) {
+        case wasm::kAstI32:
+          return jsgraph()->Int32Constant(0xdeadbeef);
+        case wasm::kAstI64:
+          return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+        case wasm::kAstF32:
+          return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+        case wasm::kAstF64:
+          return jsgraph()->Float64Constant(
+              bit_cast<double>(0xdeadbeefdeadbeef));
+          break;
+        default:
+          UNREACHABLE();
+          return nullptr;
+      }
+    } else {
+      return jsgraph()->Int32Constant(0xdeadbeef);
+    }
+  }
+
  private:
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
@@ -197,7 +226,7 @@
     *effect_ptr = effects_[reason] =
         graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
 
-    if (module && !module->context.is_null()) {
+    if (module && !module->instance->context.is_null()) {
       // Use the module context to call the runtime to throw an exception.
       Runtime::FunctionId f = Runtime::kThrow;
       const Runtime::Function* fun = Runtime::FunctionForId(f);
@@ -210,7 +239,7 @@
           jsgraph()->ExternalConstant(
               ExternalReference(f, jsgraph()->isolate())),  // ref
           jsgraph()->Int32Constant(fun->nargs),             // arity
-          jsgraph()->Constant(module->context),             // context
+          jsgraph()->Constant(module->instance->context),   // context
           *effect_ptr,
           *control_ptr};
 
@@ -227,29 +256,7 @@
       end = thrw;
     } else {
       // End the control flow with returning 0xdeadbeef
-      Node* ret_value;
-      if (builder_->GetFunctionSignature()->return_count() > 0) {
-        switch (builder_->GetFunctionSignature()->GetReturn()) {
-          case wasm::kAstI32:
-            ret_value = jsgraph()->Int32Constant(0xdeadbeef);
-            break;
-          case wasm::kAstI64:
-            ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
-            break;
-          case wasm::kAstF32:
-            ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
-            break;
-          case wasm::kAstF64:
-            ret_value = jsgraph()->Float64Constant(
-                bit_cast<double>(0xdeadbeefdeadbeef));
-            break;
-          default:
-            UNREACHABLE();
-            ret_value = nullptr;
-        }
-      } else {
-        ret_value = jsgraph()->Int32Constant(0xdeadbeef);
-      }
+      Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
       end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
                              *effect_ptr, *control_ptr);
     }
@@ -475,6 +482,9 @@
       op = m->Uint32LessThanOrEqual();
       std::swap(left, right);
       break;
+    case wasm::kExprI64And:
+      op = m->Word64And();
+      break;
 #if WASM_64
     // Opcodes only supported on 64-bit platforms.
     // TODO(titzer): query the machine operator builder here instead of #ifdef.
@@ -525,9 +535,6 @@
       op = m->Uint64Mod();
       return graph()->NewNode(op, left, right,
                               trap_->ZeroCheck64(kTrapRemByZero, right));
-    case wasm::kExprI64And:
-      op = m->Word64And();
-      break;
     case wasm::kExprI64Ior:
       op = m->Word64Or();
       break;
@@ -696,14 +703,10 @@
       op = m->ChangeUint32ToFloat64();
       break;
     case wasm::kExprF32SConvertI32:
-      op = m->ChangeInt32ToFloat64();  // TODO(titzer): two conversions
-      input = graph()->NewNode(op, input);
-      op = m->TruncateFloat64ToFloat32();
+      op = m->RoundInt32ToFloat32();
       break;
     case wasm::kExprF32UConvertI32:
-      op = m->ChangeUint32ToFloat64();
-      input = graph()->NewNode(op, input);
-      op = m->TruncateFloat64ToFloat32();
+      op = m->RoundUint32ToFloat32();
       break;
     case wasm::kExprI32SConvertF32:
       return BuildI32SConvertF32(input);
@@ -725,6 +728,10 @@
       if (m->Word32Ctz().IsSupported()) {
         op = m->Word32Ctz().op();
         break;
+      } else if (m->Word32ReverseBits().IsSupported()) {
+        Node* reversed = graph()->NewNode(m->Word32ReverseBits().op(), input);
+        Node* result = graph()->NewNode(m->Word32Clz(), reversed);
+        return result;
       } else {
         return BuildI32Ctz(input);
       }
@@ -738,84 +745,53 @@
       }
     }
     case wasm::kExprF32Floor: {
-      if (m->Float32RoundDown().IsSupported()) {
-        op = m->Float32RoundDown().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundDown().IsSupported()) return BuildF32Floor(input);
+      op = m->Float32RoundDown().op();
+      break;
     }
     case wasm::kExprF32Ceil: {
-      if (m->Float32RoundUp().IsSupported()) {
-        op = m->Float32RoundUp().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundUp().IsSupported()) return BuildF32Ceil(input);
+      op = m->Float32RoundUp().op();
+      break;
     }
     case wasm::kExprF32Trunc: {
-      if (m->Float32RoundTruncate().IsSupported()) {
-        op = m->Float32RoundTruncate().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundTruncate().IsSupported()) return BuildF32Trunc(input);
+      op = m->Float32RoundTruncate().op();
+      break;
     }
     case wasm::kExprF32NearestInt: {
-      if (m->Float32RoundTiesEven().IsSupported()) {
-        op = m->Float32RoundTiesEven().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundTiesEven().IsSupported())
+        return BuildF32NearestInt(input);
+      op = m->Float32RoundTiesEven().op();
+      break;
     }
     case wasm::kExprF64Floor: {
-      if (m->Float64RoundDown().IsSupported()) {
-        op = m->Float64RoundDown().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundDown().IsSupported()) return BuildF64Floor(input);
+      op = m->Float64RoundDown().op();
+      break;
     }
     case wasm::kExprF64Ceil: {
-      if (m->Float64RoundUp().IsSupported()) {
-        op = m->Float64RoundUp().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundUp().IsSupported()) return BuildF64Ceil(input);
+      op = m->Float64RoundUp().op();
+      break;
     }
     case wasm::kExprF64Trunc: {
-      if (m->Float64RoundTruncate().IsSupported()) {
-        op = m->Float64RoundTruncate().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundTruncate().IsSupported()) return BuildF64Trunc(input);
+      op = m->Float64RoundTruncate().op();
+      break;
     }
     case wasm::kExprF64NearestInt: {
-      if (m->Float64RoundTiesEven().IsSupported()) {
-        op = m->Float64RoundTiesEven().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundTiesEven().IsSupported())
+        return BuildF64NearestInt(input);
+      op = m->Float64RoundTiesEven().op();
+      break;
     }
-
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
+#if WASM_64
+    // Opcodes only supported on 64-bit platforms.
+    // TODO(titzer): query the machine operator builder here instead of #ifdef.
     case wasm::kExprI64SConvertI32:
       op = m->ChangeInt32ToInt64();
       break;
@@ -883,6 +859,10 @@
       if (m->Word64Ctz().IsSupported()) {
         op = m->Word64Ctz().op();
         break;
+      } else if (m->Word64ReverseBits().IsSupported()) {
+        Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
+        Node* result = graph()->NewNode(m->Word64Clz(), reversed);
+        return result;
       } else {
         return BuildI64Ctz(input);
       }
@@ -1061,8 +1041,12 @@
 
   return left_le_right.Phi(
       wasm::kAstF32, left,
-      right_lt_left.Phi(wasm::kAstF32, right,
-                        left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+      right_lt_left.Phi(
+          wasm::kAstF32, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF32,
+              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
 
@@ -1078,8 +1062,12 @@
 
   return left_ge_right.Phi(
       wasm::kAstF32, left,
-      right_gt_left.Phi(wasm::kAstF32, right,
-                        left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+      right_gt_left.Phi(
+          wasm::kAstF32, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF32,
+              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
 
@@ -1095,8 +1083,12 @@
 
   return left_le_right.Phi(
       wasm::kAstF64, left,
-      right_lt_left.Phi(wasm::kAstF64, right,
-                        left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+      right_lt_left.Phi(
+          wasm::kAstF64, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF64,
+              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
 
@@ -1112,8 +1104,12 @@
 
   return left_ge_right.Phi(
       wasm::kAstF64, left,
-      right_gt_left.Phi(wasm::kAstF64, right,
-                        left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+      right_gt_left.Phi(
+          wasm::kAstF64, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF64,
+              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
 
@@ -1121,14 +1117,12 @@
   MachineOperatorBuilder* m = jsgraph()->machine();
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
-  // TODO(titzer): two conversions
-  Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
 
   // Convert the result back to f64. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
-  Node* check = Unop(wasm::kExprF64SConvertI32, result);
-  Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+  Node* check = Unop(wasm::kExprF32SConvertI32, result);
+  Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
   trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
 
   return result;
@@ -1137,6 +1131,10 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js) {
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1155,14 +1153,12 @@
   MachineOperatorBuilder* m = jsgraph()->machine();
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
-  // TODO(titzer): two conversions
-  Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
 
-  // Convert the result back to f64. If we end up at a different value than the
+  // Convert the result back to f32. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
-  Node* check = Unop(wasm::kExprF64UConvertI32, result);
-  Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+  Node* check = Unop(wasm::kExprF32UConvertI32, result);
+  Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
   trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
 
   return result;
@@ -1171,6 +1167,10 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js) {
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
@@ -1360,6 +1360,117 @@
   return result;
 }
 
+Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
+                                                 ExternalReference ref,
+                                                 MachineType type) {
+  // We do truncation by calling a C function which calculates the truncation
+  // for us. The input is passed to the C function as a double* to avoid double
+  // parameters. For this we reserve a slot on the stack, store the parameter in
+  // that slot, pass a pointer to the slot to the C function, and after calling
+  // the C function we collect the return value from the stack slot.
+
+  Node* stack_slot_param =
+      graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
+
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(type.representation(), kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+
+  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+
+  Node* args[] = {function, stack_slot_param};
+
+  BuildCCall(sig_builder.Build(), args);
+
+  const Operator* load_op = jsgraph()->machine()->Load(type);
+
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
+  const size_t params = sig->parameter_count();
+  const size_t extra = 2;  // effect and control inputs.
+  const size_t count = 1 + params + extra;
+
+  // Reallocate the buffer to make space for extra inputs.
+  args = Realloc(args, count);
+
+  // Add effect and control inputs.
+  args[params + 1] = *effect_;
+  args[params + 2] = *control_;
+
+  CallDescriptor* desc =
+      Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
+
+  const Operator* op = jsgraph()->common()->Call(desc);
+  Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+  *effect_ = call;
+  return call;
+}
 
 Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
   const size_t params = sig->parameter_count();
@@ -1373,8 +1484,9 @@
   args[params + 1] = *effect_;
   args[params + 2] = *control_;
 
-  const Operator* op = jsgraph()->common()->Call(
-      module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+  CallDescriptor* descriptor =
+      wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+  const Operator* op = jsgraph()->common()->Call(descriptor);
   Node* call = graph()->NewNode(op, static_cast<int>(count), args);
 
   *effect_ = call;
@@ -1392,23 +1504,38 @@
   return BuildWasmCall(sig, args);
 }
 
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+  DCHECK_NULL(args[0]);
+
+  // Add code object as constant.
+  args[0] = Constant(module_->GetImportCode(index));
+  wasm::FunctionSig* sig = module_->GetImportSignature(index);
+
+  return BuildWasmCall(sig, args);
+}
 
 Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
   DCHECK_NOT_NULL(args[0]);
+  DCHECK(module_ && module_->instance);
 
   MachineOperatorBuilder* machine = jsgraph()->machine();
 
   // Compute the code object by loading it from the function table.
   Node* key = args[0];
-  Node* table = FunctionTable();
 
   // Bounds check the index.
   int table_size = static_cast<int>(module_->FunctionTableSize());
-  {
+  if (table_size > 0) {
+    // Bounds check against the table size.
     Node* size = Int32Constant(static_cast<int>(table_size));
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
     trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+  } else {
+    // No function table. Generate a trap and return a constant.
+    trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+    return trap_->GetTrapValue(module_->GetSignature(index));
   }
+  Node* table = FunctionTable();
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -1546,7 +1673,8 @@
   args[pos++] = *control_;
 
   // Call the WASM code.
-  CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+  CallDescriptor* desc =
+      wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
   Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
   Node* jsval =
       ToJS(call, context,
@@ -1631,18 +1759,23 @@
 
 
 Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+  DCHECK(module_ && module_->instance);
   if (offset == 0) {
-    if (!mem_buffer_)
-      mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+    if (!mem_buffer_) {
+      mem_buffer_ = jsgraph()->IntPtrConstant(
+          reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+    }
     return mem_buffer_;
   } else {
-    return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+    return jsgraph()->IntPtrConstant(
+        reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
   }
 }
 
 
 Node* WasmGraphBuilder::MemSize(uint32_t offset) {
-  int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+  DCHECK(module_ && module_->instance);
+  uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
   if (offset == 0) {
     if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
     return mem_size_;
@@ -1653,18 +1786,21 @@
 
 
 Node* WasmGraphBuilder::FunctionTable() {
+  DCHECK(module_ && module_->instance &&
+         !module_->instance->function_table.is_null());
   if (!function_table_) {
-    DCHECK(!module_->function_table.is_null());
-    function_table_ = jsgraph()->Constant(module_->function_table);
+    function_table_ = jsgraph()->Constant(module_->instance->function_table);
   }
   return function_table_;
 }
 
 
 Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
-      module_->globals_area + module_->module->globals->at(index).offset);
+      reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+                                  module_->module->globals->at(index).offset));
   const Operator* op = jsgraph()->machine()->Load(mem_type);
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
                                 *control_);
@@ -1674,9 +1810,11 @@
 
 
 Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
-      module_->globals_area + module_->module->globals->at(index).offset);
+      reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+                                  module_->module->globals->at(index).offset));
   const Operator* op = jsgraph()->machine()->Store(
       StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1689,12 +1827,11 @@
 void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
                                       uint32_t offset) {
   // TODO(turbofan): fold bounds checks for constant indexes.
-  CHECK_GE(module_->mem_end, module_->mem_start);
-  ptrdiff_t size = module_->mem_end - module_->mem_start;
+  DCHECK(module_ && module_->instance);
+  size_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
   Node* cond;
-  if (static_cast<ptrdiff_t>(offset) >= size ||
-      static_cast<ptrdiff_t>(offset + memsize) > size) {
+  if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
     // The access will always throw.
     cond = jsgraph()->Int32Constant(0);
   } else {
@@ -1782,6 +1919,35 @@
 
 Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
 
+void WasmGraphBuilder::Int64LoweringForTesting() {
+  if (kPointerSize == 4) {
+    Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
+                    jsgraph()->common(), jsgraph()->zone(),
+                    function_signature_);
+    r.LowerGraph();
+  }
+}
+
+static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                                      CompilationInfo* info,
+                                      const char* message, uint32_t index,
+                                      const char* func_name) {
+  Isolate* isolate = info->isolate();
+  if (isolate->logger()->is_logging_code_events() ||
+      isolate->cpu_profiler()->is_profiling()) {
+    ScopedVector<char> buffer(128);
+    SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+    Handle<String> name_str =
+        isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+    Handle<String> script_str =
+        isolate->factory()->NewStringFromAsciiChecked("(WASM)");
+    Handle<Code> code = info->code();
+    Handle<SharedFunctionInfo> shared =
+        isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
+    PROFILE(isolate,
+            CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+  }
+}
 
 Handle<JSFunction> CompileJSToWasmWrapper(
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
@@ -1849,38 +2015,42 @@
         module->GetFunctionSignature(index)->parameter_count());
     CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
         &zone, false, params + 1, CallDescriptor::kNoFlags);
-    CompilationInfo info("js-to-wasm", isolate, &zone);
     // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    info.set_output_code_kind(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    bool debugging =
+#if DEBUG
+        true;
+#else
+        FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+    const char* func_name = "js-to-wasm";
+
+    static unsigned id = 0;
+    Vector<char> buffer;
+    if (debugging) {
+      buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "js-to-wasm#%d", id);
+      func_name = buffer.start();
+    }
+
+    CompilationInfo info(func_name, isolate, &zone, flags);
     Handle<Code> code =
         Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
-
-#ifdef ENABLE_DISASSEMBLER
-    // Disassemble the wrapper code for debugging.
-    if (!code.is_null() && FLAG_print_opt_code) {
-      Vector<char> buffer;
-      const char* name = "";
-      if (func->name_offset > 0) {
-        const byte* ptr = module->module->module_start + func->name_offset;
-        name = reinterpret_cast<const char*>(ptr);
-      }
-      SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
-      OFStream os(stdout);
-      code->Disassemble(buffer.start(), os);
+    if (debugging) {
+      buffer.Dispose();
     }
-#endif
+
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+                              module->module->GetName(func->name_offset));
     // Set the JSFunction's machine code.
     function->set_code(*code);
   }
   return function;
 }
 
-
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    uint32_t index) {
-  wasm::WasmFunction* func = &module->module->functions->at(index);
-
+                                    wasm::FunctionSig* sig, const char* name) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
@@ -1894,11 +2064,11 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+  WasmGraphBuilder builder(&zone, &jsgraph, sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
   builder.set_module(module);
-  builder.BuildWasmToJSWrapper(function, func->sig);
+  builder.BuildWasmToJSWrapper(function, sig);
 
   Handle<Code> code = Handle<Code>::null();
   {
@@ -1923,26 +2093,33 @@
     }
 
     // Schedule and compile to machine code.
-    CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
-    CompilationInfo info("wasm-to-js", isolate, &zone);
+    CallDescriptor* incoming =
+        wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
     // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    info.set_output_code_kind(Code::WASM_FUNCTION);
-    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
-
-#ifdef ENABLE_DISASSEMBLER
-    // Disassemble the wrapper code for debugging.
-    if (!code.is_null() && FLAG_print_opt_code) {
-      Vector<char> buffer;
-      const char* name = "";
-      if (func->name_offset > 0) {
-        const byte* ptr = module->module->module_start + func->name_offset;
-        name = reinterpret_cast<const char*>(ptr);
-      }
-      SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
-      OFStream os(stdout);
-      code->Disassemble(buffer.start(), os);
-    }
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    bool debugging =
+#if DEBUG
+        true;
+#else
+        FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
 #endif
+    const char* func_name = "wasm-to-js";
+    static unsigned id = 0;
+    Vector<char> buffer;
+    if (debugging) {
+      buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "wasm-to-js#%d", id);
+      func_name = buffer.start();
+    }
+
+    CompilationInfo info(func_name, isolate, &zone, flags);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+    if (debugging) {
+      buffer.Dispose();
+    }
+
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
+                              name);
   }
   return code;
 }
@@ -1951,25 +2128,21 @@
 // Helper function to compile a single function.
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function,
-                                 int index) {
+                                 const wasm::WasmFunction& function) {
   if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
-    // TODO(titzer): clean me up a bit.
     OFStream os(stdout);
-    os << "Compiling WASM function #" << index << ":";
-    if (function.name_offset > 0) {
-      os << module_env->module->GetName(function.name_offset);
-    }
+    os << "Compiling WASM function "
+       << wasm::WasmFunctionName(&function, module_env) << std::endl;
     os << std::endl;
   }
   // Initialize the function environment for decoding.
   wasm::FunctionEnv env;
   env.module = module_env;
   env.sig = function.sig;
-  env.local_int32_count = function.local_int32_count;
-  env.local_int64_count = function.local_int64_count;
-  env.local_float32_count = function.local_float32_count;
-  env.local_float64_count = function.local_float64_count;
+  env.local_i32_count = function.local_i32_count;
+  env.local_i64_count = function.local_i64_count;
+  env.local_f32_count = function.local_f32_count;
+  env.local_f64_count = function.local_f64_count;
   env.SumLocals();
 
   // Create a TF graph during decoding.
@@ -1993,35 +2166,49 @@
       os << "Compilation failed: " << result << std::endl;
     }
     // Add the function as another context for the exception
-    Vector<char> buffer;
-    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+    ScopedVector<char> buffer(128);
+    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
+             function.func_index,
              module_env->module->GetName(function.name_offset));
     thrower.Failed(buffer.start(), result);
     return Handle<Code>::null();
   }
 
   // Run the compiler pipeline to generate machine code.
-  CallDescriptor* descriptor = const_cast<CallDescriptor*>(
-      module_env->GetWasmCallDescriptor(&zone, function.sig));
-  CompilationInfo info("wasm", isolate, &zone);
-  info.set_output_code_kind(Code::WASM_FUNCTION);
+  CallDescriptor* descriptor =
+      wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
+  if (kPointerSize == 4) {
+    descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
+  }
+  Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+  // add flags here if a meaningful name is helpful for debugging.
+  bool debugging =
+#if DEBUG
+      true;
+#else
+      FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+  const char* func_name = "wasm";
+  Vector<char> buffer;
+  if (debugging) {
+    buffer = Vector<char>::New(128);
+    SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
+             module_env->module->GetName(function.name_offset));
+    func_name = buffer.start();
+  }
+  CompilationInfo info(func_name, isolate, &zone, flags);
+
   Handle<Code> code =
       Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
-
-#ifdef ENABLE_DISASSEMBLER
-  // Disassemble the code for debugging.
-  if (!code.is_null() && FLAG_print_opt_code) {
-    Vector<char> buffer;
-    const char* name = "";
-    if (function.name_offset > 0) {
-      const byte* ptr = module_env->module->module_start + function.name_offset;
-      name = reinterpret_cast<const char*>(ptr);
-    }
-    SNPrintF(buffer, "WASM function #%d:%s", index, name);
-    OFStream os(stdout);
-    code->Disassemble(buffer.start(), os);
+  if (debugging) {
+    buffer.Dispose();
   }
-#endif
+  if (!code.is_null()) {
+    RecordFunctionCompilation(
+        Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
+        module_env->module->GetName(function.name_offset));
+  }
+
   return code;
 }
 
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 1a17a83..2e86b56 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -35,12 +35,12 @@
 // Compiles a single function, producing a code object.
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function, int index);
+                                 const wasm::WasmFunction& function);
 
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    uint32_t index);
+                                    wasm::FunctionSig* sig, const char* name);
 
 // Wraps a given wasm code object, producing a JSFunction that can be called
 // from JavaScript.
@@ -100,6 +100,7 @@
   Node* Unreachable();
 
   Node* CallDirect(uint32_t index, Node** args);
+  Node* CallImport(uint32_t index, Node** args);
   Node* CallIndirect(uint32_t index, Node** args);
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSFunction> function,
@@ -132,6 +133,8 @@
 
   wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
 
+  void Int64LoweringForTesting();
+
  private:
   static const int kDefaultBufferSize = 16;
   friend class WasmTrapHelper;
@@ -159,6 +162,7 @@
   Node* MemBuffer(uint32_t offset);
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
 
+  Node* BuildCCall(MachineSignature* sig, Node** args);
   Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
   Node* BuildF32Neg(Node* input);
   Node* BuildF64Neg(Node* input);
@@ -176,6 +180,16 @@
   Node* BuildI32Popcnt(Node* input);
   Node* BuildI64Ctz(Node* input);
   Node* BuildI64Popcnt(Node* input);
+  Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
+                                 MachineType type);
+  Node* BuildF32Trunc(Node* input);
+  Node* BuildF32Floor(Node* input);
+  Node* BuildF32Ceil(Node* input);
+  Node* BuildF32NearestInt(Node* input);
+  Node* BuildF64Trunc(Node* input);
+  Node* BuildF64Floor(Node* input);
+  Node* BuildF64Ceil(Node* input);
+  Node* BuildF64NearestInt(Node* input);
 
   Node** Realloc(Node** buffer, size_t count) {
     Node** buf = Buffer(count);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 92363dd..3176fd3 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
 // ===========================================================================
 // == ia32 ===================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
 #define FP_RETURN_REGISTERS xmm1, xmm2
@@ -76,7 +76,7 @@
 // ===========================================================================
 // == x87 ====================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_RETURN_REGISTERS stX_0
 
@@ -191,15 +191,7 @@
 };
 }  // namespace
 
-
-// General code uses the above configuration data.
-CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
-                                                 FunctionSig* fsig) {
-  MachineSignature::Builder msig(zone, fsig->return_count(),
-                                 fsig->parameter_count());
-  LocationSignature::Builder locations(zone, fsig->return_count(),
-                                       fsig->parameter_count());
-
+static Allocator GetReturnRegisters() {
 #ifdef GP_RETURN_REGISTERS
   static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
   static const int kGPReturnRegistersCount =
@@ -221,14 +213,10 @@
   Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
                  kFPReturnRegisters, kFPReturnRegistersCount);
 
-  // Add return location(s).
-  const int return_count = static_cast<int>(locations.return_count_);
-  for (int i = 0; i < return_count; i++) {
-    LocalType ret = fsig->GetReturn(i);
-    msig.AddReturn(MachineTypeFor(ret));
-    locations.AddReturn(rets.Next(ret));
-  }
+  return rets;
+}
 
+static Allocator GetParameterRegisters() {
 #ifdef GP_PARAM_REGISTERS
   static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
   static const int kGPParamRegistersCount =
@@ -250,6 +238,29 @@
   Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
                    kFPParamRegistersCount);
 
+  return params;
+}
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+                                                 FunctionSig* fsig) {
+  MachineSignature::Builder msig(zone, fsig->return_count(),
+                                 fsig->parameter_count());
+  LocationSignature::Builder locations(zone, fsig->return_count(),
+                                       fsig->parameter_count());
+
+  Allocator rets = GetReturnRegisters();
+
+  // Add return location(s).
+  const int return_count = static_cast<int>(locations.return_count_);
+  for (int i = 0; i < return_count; i++) {
+    LocalType ret = fsig->GetReturn(i);
+    msig.AddReturn(MachineTypeFor(ret));
+    locations.AddReturn(rets.Next(ret));
+  }
+
+  Allocator params = GetParameterRegisters();
+
   // Add register and/or stack parameter(s).
   const int parameter_count = static_cast<int>(fsig->parameter_count());
   for (int i = 0; i < parameter_count; i++) {
@@ -264,6 +275,7 @@
   // The target for WASM calls is always a code object.
   MachineType target_type = MachineType::AnyTagged();
   LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+
   return new (zone) CallDescriptor(       // --
       CallDescriptor::kCallCodeObject,    // kind
       target_type,                        // target MachineType
@@ -275,8 +287,82 @@
       kCalleeSaveRegisters,               // callee-saved registers
       kCalleeSaveFPRegisters,             // callee-saved fp regs
       CallDescriptor::kUseNativeStack,    // flags
-      "c-call");
+      "wasm-call");
 }
+
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
+    Zone* zone, CallDescriptor* descriptor) {
+  const MachineSignature* signature = descriptor->GetMachineSignature();
+  size_t parameter_count = signature->parameter_count();
+  size_t return_count = signature->return_count();
+  for (size_t i = 0; i < signature->parameter_count(); i++) {
+    if (signature->GetParam(i) == MachineType::Int64()) {
+      // For each int64 input we get two int32 inputs.
+      parameter_count++;
+    }
+  }
+  for (size_t i = 0; i < signature->return_count(); i++) {
+    if (signature->GetReturn(i) == MachineType::Int64()) {
+      // For each int64 return we get two int32 returns.
+      return_count++;
+    }
+  }
+  if (parameter_count == signature->parameter_count() &&
+      return_count == signature->return_count()) {
+    // If there is no int64 parameter or return value, we can just return the
+    // original descriptor.
+    return descriptor;
+  }
+
+  MachineSignature::Builder msig(zone, return_count, parameter_count);
+  LocationSignature::Builder locations(zone, return_count, parameter_count);
+
+  Allocator rets = GetReturnRegisters();
+
+  for (size_t i = 0; i < signature->return_count(); i++) {
+    if (signature->GetReturn(i) == MachineType::Int64()) {
+      // For each int64 return we get two int32 returns.
+      msig.AddReturn(MachineType::Int32());
+      msig.AddReturn(MachineType::Int32());
+      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+    } else {
+      msig.AddReturn(signature->GetReturn(i));
+      locations.AddReturn(rets.Next(signature->GetReturn(i).representation()));
+    }
+  }
+
+  Allocator params = GetParameterRegisters();
+
+  for (size_t i = 0; i < signature->parameter_count(); i++) {
+    if (signature->GetParam(i) == MachineType::Int64()) {
+      // For each int64 input we get two int32 inputs.
+      msig.AddParam(MachineType::Int32());
+      msig.AddParam(MachineType::Int32());
+      locations.AddParam(params.Next(MachineRepresentation::kWord32));
+      locations.AddParam(params.Next(MachineRepresentation::kWord32));
+    } else {
+      msig.AddParam(signature->GetParam(i));
+      locations.AddParam(params.Next(signature->GetParam(i).representation()));
+    }
+  }
+
+  return new (zone) CallDescriptor(          // --
+      descriptor->kind(),                    // kind
+      descriptor->GetInputType(0),           // target MachineType
+      descriptor->GetInputLocation(0),       // target location
+      msig.Build(),                          // machine_sig
+      locations.Build(),                     // location_sig
+      params.stack_offset,                   // stack_parameter_count
+      descriptor->properties(),              // properties
+      descriptor->CalleeSavedRegisters(),    // callee-saved registers
+      descriptor->CalleeSavedFPRegisters(),  // callee-saved fp regs
+      descriptor->flags(),                   // flags
+      descriptor->debug_name());
+
+  return descriptor;
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index be406fb..510c0c6 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -209,15 +209,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ leap(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -261,6 +262,32 @@
     }                                                          \
   } while (0)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
 
 #define ASSEMBLE_MULT(asm_instr)                              \
   do {                                                        \
@@ -654,11 +681,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -712,6 +734,13 @@
     case kArchFramePointer:
       __ movq(i.OutputRegister(), rbp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ movq(i.OutputRegister(), Operand(rbp, 0));
+      } else {
+        __ movq(i.OutputRegister(), rbp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -740,6 +769,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = rsp;
+      } else {
+        base = rbp;
+      }
+      __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -759,16 +800,16 @@
       ASSEMBLE_BINOP(andq);
       break;
     case kX64Cmp32:
-      ASSEMBLE_BINOP(cmpl);
+      ASSEMBLE_COMPARE(cmpl);
       break;
     case kX64Cmp:
-      ASSEMBLE_BINOP(cmpq);
+      ASSEMBLE_COMPARE(cmpq);
       break;
     case kX64Test32:
-      ASSEMBLE_BINOP(testl);
+      ASSEMBLE_COMPARE(testl);
       break;
     case kX64Test:
-      ASSEMBLE_BINOP(testq);
+      ASSEMBLE_COMPARE(testq);
       break;
     case kX64Imul32:
       ASSEMBLE_MULT(imull);
@@ -947,6 +988,22 @@
       __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
       break;
     }
+    case kSSEFloat32ToInt32:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+    case kSSEFloat32ToUint32: {
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
+      break;
+    }
     case kSSEFloat64Cmp:
       ASSEMBLE_SSE_BINOP(Ucomisd);
       break;
@@ -1197,6 +1254,13 @@
         __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
+    case kSSEInt32ToFloat32:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
+      } else {
+        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      }
+      break;
     case kSSEInt64ToFloat32:
       if (instr->InputAt(0)->IsRegister()) {
         __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -1237,6 +1301,14 @@
       }
       __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
       break;
+    case kSSEUint32ToFloat32:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ movl(kScratchRegister, i.InputRegister(0));
+      } else {
+        __ movl(kScratchRegister, i.InputOperand(0));
+      }
+      __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
+      break;
     case kSSEFloat64ExtractLowWord32:
       if (instr->InputAt(0)->IsDoubleStackSlot()) {
         __ movl(i.OutputRegister(), i.InputOperand(0));
@@ -1828,8 +1900,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -=
         static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
   }
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 8e8e765..6d5e77c 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -63,6 +63,8 @@
   V(SSEFloat32Max)                 \
   V(SSEFloat32Min)                 \
   V(SSEFloat32ToFloat64)           \
+  V(SSEFloat32ToInt32)             \
+  V(SSEFloat32ToUint32)            \
   V(SSEFloat32Round)               \
   V(SSEFloat64Cmp)                 \
   V(SSEFloat64Add)                 \
@@ -84,11 +86,13 @@
   V(SSEFloat32ToUint64)            \
   V(SSEFloat64ToUint64)            \
   V(SSEInt32ToFloat64)             \
+  V(SSEInt32ToFloat32)             \
   V(SSEInt64ToFloat32)             \
   V(SSEInt64ToFloat64)             \
   V(SSEUint64ToFloat32)            \
   V(SSEUint64ToFloat64)            \
   V(SSEUint32ToFloat64)            \
+  V(SSEUint32ToFloat32)            \
   V(SSEFloat64ExtractLowWord32)    \
   V(SSEFloat64ExtractHighWord32)   \
   V(SSEFloat64InsertLowWord32)     \
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index f8537c8..1f10b51 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -79,6 +79,8 @@
     case kSSEFloat64Max:
     case kSSEFloat64Min:
     case kSSEFloat64ToFloat32:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat32ToUint32:
     case kSSEFloat64ToInt32:
     case kSSEFloat64ToUint32:
     case kSSEFloat64ToInt64:
@@ -86,11 +88,13 @@
     case kSSEFloat64ToUint64:
     case kSSEFloat32ToUint64:
     case kSSEInt32ToFloat64:
+    case kSSEInt32ToFloat32:
     case kSSEInt64ToFloat32:
     case kSSEInt64ToFloat64:
     case kSSEUint64ToFloat32:
     case kSSEUint64ToFloat64:
     case kSSEUint32ToFloat64:
+    case kSSEUint32ToFloat32:
     case kSSEFloat64ExtractLowWord32:
     case kSSEFloat64ExtractHighWord32:
     case kSSEFloat64InsertLowWord32:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index c47a42e..d3a2a8e 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -133,6 +133,7 @@
     case MachineRepresentation::kWord64:
       opcode = kX64Movq;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -219,6 +220,7 @@
       case MachineRepresentation::kWord64:
         opcode = kX64Movq;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -264,8 +266,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kTagged:
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -316,8 +319,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kTagged:
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -622,6 +626,12 @@
 }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X64OperandGenerator g(this);
   Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -734,10 +744,11 @@
   if (selector->IsLive(left) && !selector->IsLive(right)) {
     std::swap(left, right);
   }
+  InstructionOperand temps[] = {g.TempRegister(rax)};
   // TODO(turbofan): We use UseUniqueRegister here to improve register
   // allocation.
   selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
-                 g.UseUniqueRegister(right));
+                 g.UseUniqueRegister(right), arraysize(temps), temps);
 }
 
 
@@ -752,9 +763,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, rdx),
-                 g.UseFixed(node->InputAt(0), rax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(rax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 }  // namespace
@@ -857,6 +869,18 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1046,6 +1070,12 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1058,6 +1088,12 @@
 }
 
 
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister()};
@@ -1303,6 +1339,48 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  X64OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation rep =
+      LoadRepresentationOf(input->op()).representation();
+  if (rep == MachineRepresentation::kWord64 ||
+      rep == MachineRepresentation::kTagged) {
+    return opcode == kX64Cmp || opcode == kX64Test;
+  } else if (rep == MachineRepresentation::kWord32) {
+    return opcode == kX64Cmp32 || opcode == kX64Test32;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1330,26 +1408,41 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   X64OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 // Shared routine for 64-bit word comparison operations.
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index a7b7246..1575570 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/frames.h"
 #include "src/x87/assembler-x87.h"
 #include "src/x87/frames-x87.h"
 #include "src/x87/macro-assembler-x87.h"
@@ -50,7 +51,7 @@
 
   Operand ToMaterializableOperand(int materializable_offset) {
     FrameOffset offset = frame_access_state()->GetFrameOffset(
-        Frame::FPOffsetToSlot(materializable_offset));
+        FPOffsetToFrameSlot(materializable_offset));
     return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
   }
 
@@ -245,15 +246,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ lea(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -462,14 +464,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      // Lazy Bailout entry, need to re-initialize FPU state.
-      __ fninit();
-      __ fld1();
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -559,6 +553,13 @@
     case kArchStackPointer:
       __ mov(i.OutputRegister(), esp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ mov(i.OutputRegister(), Operand(ebp, 0));
+      } else {
+        __ mov(i.OutputRegister(), ebp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fld_d(i.InputOperand(0));
@@ -587,6 +588,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = esp;
+      } else {
+        base = ebp;
+      }
+      __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kX87Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -602,17 +615,37 @@
       }
       break;
     case kX87Cmp:
-      if (HasImmediateInput(instr, 1)) {
-        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ cmp(operand, i.InputImmediate(index));
+        } else {
+          __ cmp(operand, i.InputRegister(index));
+        }
       } else {
-        __ cmp(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ cmp(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ cmp(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kX87Test:
-      if (HasImmediateInput(instr, 1)) {
-        __ test(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ test(operand, i.InputImmediate(index));
+        } else {
+          __ test(i.InputRegister(index), operand);
+        }
       } else {
-        __ test(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ test(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ test(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kX87Imul:
@@ -1062,6 +1095,66 @@
       __ lea(esp, Operand(esp, kDoubleSize));
       break;
     }
+    case kX87Int32ToFloat32: {
+      InstructionOperand* input = instr->InputAt(0);
+      DCHECK(input->IsRegister() || input->IsStackSlot());
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      if (input->IsRegister()) {
+        Register input_reg = i.InputRegister(0);
+        __ push(input_reg);
+        __ fild_s(Operand(esp, 0));
+        __ pop(input_reg);
+      } else {
+        __ fild_s(i.InputOperand(0));
+      }
+      break;
+    }
+    case kX87Uint32ToFloat32: {
+      InstructionOperand* input = instr->InputAt(0);
+      DCHECK(input->IsRegister() || input->IsStackSlot());
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      Label msb_set_src;
+      Label jmp_return;
+      // Put input integer into eax(tmporarilly)
+      __ push(eax);
+      if (input->IsRegister())
+        __ mov(eax, i.InputRegister(0));
+      else
+        __ mov(eax, i.InputOperand(0));
+
+      __ test(eax, eax);
+      __ j(sign, &msb_set_src, Label::kNear);
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+
+      __ jmp(&jmp_return, Label::kNear);
+      __ bind(&msb_set_src);
+      // Need another temp reg
+      __ push(ebx);
+      __ mov(ebx, eax);
+      __ shr(eax, 1);
+      // Recover the least significant bit to avoid rounding errors.
+      __ and_(ebx, Immediate(1));
+      __ or_(eax, ebx);
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+      __ fld(0);
+      __ faddp();
+      // Restore the ebx
+      __ pop(ebx);
+      __ bind(&jmp_return);
+      // Restore the eax
+      __ pop(eax);
+      break;
+    }
     case kX87Int32ToFloat64: {
       InstructionOperand* input = instr->InputAt(0);
       DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1104,6 +1197,36 @@
       __ LoadUint32NoSSE2(i.InputRegister(0));
       break;
     }
+    case kX87Float32ToInt32: {
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fld_s(i.InputOperand(0));
+      }
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fstp(0);
+      }
+      break;
+    }
+    case kX87Float32ToUint32: {
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fld_s(i.InputOperand(0));
+      }
+      Label success;
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      __ test(i.OutputRegister(0), i.OutputRegister(0));
+      __ j(positive, &success);
+      __ push(Immediate(INT32_MIN));
+      __ fild_s(Operand(esp, 0));
+      __ lea(esp, Operand(esp, kPointerSize));
+      __ faddp();
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      __ or_(i.OutputRegister(0), Immediate(0x80000000));
+      __ bind(&success);
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fstp(0);
+      }
+      break;
+    }
     case kX87Float64ToInt32: {
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fld_d(i.InputOperand(0));
@@ -1817,8 +1940,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index b498d9c..e5d0912 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -53,10 +53,14 @@
   V(X87Float64Max)                 \
   V(X87Float64Min)                 \
   V(X87Float64Abs)                 \
+  V(X87Int32ToFloat32)             \
+  V(X87Uint32ToFloat32)            \
   V(X87Int32ToFloat64)             \
   V(X87Float32ToFloat64)           \
   V(X87Uint32ToFloat64)            \
   V(X87Float64ToInt32)             \
+  V(X87Float32ToInt32)             \
+  V(X87Float32ToUint32)            \
   V(X87Float64ToFloat32)           \
   V(X87Float64ToUint32)            \
   V(X87Float64ExtractHighWord32)   \
@@ -84,7 +88,6 @@
   V(X87Poke)                       \
   V(X87StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index cff4aaf..079d5d2 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -151,7 +151,8 @@
     case MachineRepresentation::kWord32:
       opcode = kX87Movl;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -236,7 +237,8 @@
       case MachineRepresentation::kWord32:
         opcode = kX87Movl;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -288,9 +290,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -334,9 +337,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -469,9 +473,10 @@
 void VisitMulHigh(InstructionSelector* selector, Node* node,
                   ArchOpcode opcode) {
   X87OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(eax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 
@@ -549,6 +554,9 @@
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -655,6 +663,20 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Int32ToFloat32, g.DefineAsFixed(node, stX_0),
+       g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Uint32ToFloat32, g.DefineAsFixed(node, stX_0),
+       g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
@@ -669,6 +691,18 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -959,6 +993,46 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  X87OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation load_representation =
+      LoadRepresentationOf(input->op()).representation();
+  if (load_representation == MachineRepresentation::kWord32 ||
+      load_representation == MachineRepresentation::kTagged) {
+    return opcode == kX87Cmp || opcode == kX87Test;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1020,26 +1094,41 @@
   }
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {