Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index bdf4c47..a0b5022 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -54,6 +54,7 @@
   SBit OutputSBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
+      case kFlags_deoptimize:
       case kFlags_set:
         return SetCC;
       case kFlags_none:
@@ -149,8 +150,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -164,7 +168,9 @@
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
-    __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+    // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
+    __ vmov(result_, -1.0f);
+    __ vsqrt(result_, result_);
   }
 
  private:
@@ -178,7 +184,9 @@
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final {
-    __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+    // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
+    __ vmov(result_, -1.0);
+    __ vsqrt(result_, result_);
   }
 
  private:
@@ -222,7 +230,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -236,7 +245,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
     }
@@ -249,7 +258,7 @@
       __ add(scratch1_, object_, Operand(index_));
     }
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(lr);
     }
   }
@@ -262,6 +271,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -378,6 +388,11 @@
     DCHECK_EQ(LeaveCC, i.OutputSBit());           \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -394,7 +409,7 @@
     __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     if (FLAG_enable_embedded_constant_pool) {
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
     }
@@ -404,14 +419,39 @@
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   ArmOperandConverter i(this, instr);
 
-  masm()->MaybeCheckConstPool();
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  __ MaybeCheckConstPool();
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -427,9 +467,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -458,6 +504,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -468,6 +515,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -535,7 +587,7 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ldr(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -742,6 +794,67 @@
       __ teq(i.InputRegister(0), i.InputOperand2(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
       break;
+    case kArmAddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+             SBit::SetCC);
+      __ adc(i.OutputRegister(1), i.InputRegister(1),
+             Operand(i.InputRegister(3)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
+             SBit::SetCC);
+      __ sbc(i.OutputRegister(1), i.InputRegister(1),
+             Operand(i.InputRegister(3)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmMulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+               i.InputRegister(2));
+      __ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
+             i.OutputRegister(1));
+      __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
+             i.OutputRegister(1));
+      break;
+    case kArmLslPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
+    case kArmLsrPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
+    case kArmAsrPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), i.InputInt32(2));
+      } else {
+        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
+      }
+      break;
     case kArmVcmpF32:
       if (instr->InputAt(1)->IsDoubleRegister()) {
         __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
@@ -1155,29 +1268,32 @@
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+  __ CheckConstPool(false, false);
 }
 
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    if (FLAG_enable_embedded_constant_pool) {
-      __ Push(lr, fp, pp);
-      // Adjust FP to point to saved FP.
-      __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      if (FLAG_enable_embedded_constant_pool) {
+        __ Push(lr, fp, pp);
+        // Adjust FP to point to saved FP.
+        __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      } else {
+        __ Push(lr, fp);
+        __ mov(fp, sp);
+      }
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
     } else {
-      __ Push(lr, fp);
-      __ mov(fp, sp);
+      __ StubPrologue(info()->GetOutputStackFrameType());
     }
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1247,15 +1363,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ LeaveFrame(StackFrame::MANUAL);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ b(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ LeaveFrame(StackFrame::MANUAL);
+      AssembleDeconstructFrame();
     }
   }
   __ Ret(pop_count);
@@ -1311,9 +1427,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ ldr(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ ldr(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 50fa555..5e6f5c9 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -46,6 +46,12 @@
   V(ArmUxtab)                      \
   V(ArmRbit)                       \
   V(ArmUxtah)                      \
+  V(ArmAddPair)                    \
+  V(ArmSubPair)                    \
+  V(ArmMulPair)                    \
+  V(ArmLslPair)                    \
+  V(ArmLsrPair)                    \
+  V(ArmAsrPair)                    \
   V(ArmVcmpF32)                    \
   V(ArmVaddF32)                    \
   V(ArmVsubF32)                    \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index d950e8c..466765e 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -48,6 +48,12 @@
     case kArmUxtab:
     case kArmUxtah:
     case kArmRbit:
+    case kArmAddPair:
+    case kArmSubPair:
+    case kArmMulPair:
+    case kArmLslPair:
+    case kArmLsrPair:
+    case kArmAsrPair:
     case kArmVcmpF32:
     case kArmVaddF32:
     case kArmVsubF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 14b30b1..76d9e3c 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -237,8 +237,13 @@
   DCHECK_GE(arraysize(outputs), output_count);
   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -369,9 +374,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_Offset_RR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -691,8 +694,13 @@
   DCHECK_GE(arraysize(outputs), output_count);
   DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -759,6 +767,120 @@
   VisitShift(this, node, TryMatchASR);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  ArmOperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmAddPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  ArmOperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmSubPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  ArmOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
+  // no register aliasing with output registers.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmLslPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+  // guarantee that there is no register aliasing with output register.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmLsrPair, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  ArmOperandGenerator g(this);
+  // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
+  // guarantee that there is no register aliasing with output register.
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kArmAsrPair, 2, outputs, 3, inputs);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, TryMatchROR);
@@ -1013,7 +1135,9 @@
   VisitRR(this, kArmVcvtU32F64, node);
 }
 
-
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kArmVcvtU32F64, node);
+}
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   VisitRR(this, kArmVcvtF32F64, node);
 }
@@ -1272,6 +1396,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1284,6 +1409,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1357,8 +1485,7 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
-  } else {
-    DCHECK(cont->IsSet());
+  } else if (cont->IsSet()) {
     outputs[output_count++] = g.DefineAsRegister(cont->result());
   }
 
@@ -1366,8 +1493,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -1482,7 +1614,11 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
+                             cont->frame_state());
   } else {
+    DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    value_operand);
   }
@@ -1490,13 +1626,23 @@
 
 }  // namespace
 
-
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   ArmOperandGenerator g(this);
@@ -1527,7 +1673,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1537,32 +1683,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
   }
   FlagsContinuation cont;
@@ -1572,7 +1720,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
   }
   FlagsContinuation cont;
@@ -1581,37 +1729,39 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index e45c677..456e7e7 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -207,13 +207,13 @@
   MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
+  }
+
+  MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     if (offset.from_frame_pointer()) {
-      int from_sp =
-          offset.offset() +
-          ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
-           kPointerSize);
+      int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
       // Convert FP-offsets to SP-offsets if it results in better code.
       if (Assembler::IsImmLSUnscaled(from_sp) ||
           Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
@@ -279,7 +279,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -293,7 +294,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Push(lr);
     }
@@ -301,7 +302,7 @@
                          remembered_set_action, save_fp_mode);
     __ Add(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(lr);
     }
   }
@@ -313,6 +314,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -466,6 +468,15 @@
     }                                                                       \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
+    __ Mov(csp, fp);
+  } else {
+    __ Mov(jssp, fp);
+  }
+  __ Pop(fp, lr);
+}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -482,13 +493,37 @@
     __ Claim(-sp_slot_delta);
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ B(ne, &done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ Ldr(caller_args_count_reg,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -506,21 +541,31 @@
         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
         __ Call(target);
       }
+      RecordCallPosition(instr);
       // TODO(titzer): this is ugly. JSSP should be a caller-save register
       // in this case, but it is not possible to express in the register
       // allocator.
-      CallDescriptor::Flags flags =
-          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      CallDescriptor::Flags flags(MiscField::decode(opcode));
       if (flags & CallDescriptor::kRestoreJSSP) {
-        __ mov(jssp, csp);
+        __ Ldr(jssp, MemOperand(csp));
+        __ Mov(csp, jssp);
+      }
+      if (flags & CallDescriptor::kRestoreCSP) {
+        __ Mov(csp, jssp);
+        __ AssertCspAligned();
       }
       frame_access_state()->ClearSPDelta();
-      RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -545,18 +590,23 @@
       }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Call(x10);
+      RecordCallPosition(instr);
       // TODO(titzer): this is ugly. JSSP should be a caller-save register
       // in this case, but it is not possible to express in the register
       // allocator.
-      CallDescriptor::Flags flags =
-          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      CallDescriptor::Flags flags(MiscField::decode(opcode));
       if (flags & CallDescriptor::kRestoreJSSP) {
-        __ mov(jssp, csp);
+        __ Ldr(jssp, MemOperand(csp));
+        __ Mov(csp, jssp);
+      }
+      if (flags & CallDescriptor::kRestoreCSP) {
+        __ Mov(csp, jssp);
+        __ AssertCspAligned();
       }
       frame_access_state()->ClearSPDelta();
-      RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -569,6 +619,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(x10);
       frame_access_state()->ClearSPDelta();
@@ -628,7 +683,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ldr(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -931,22 +986,46 @@
       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
     case kArm64ClaimCSP: {
-      int count = i.InputInt32(0);
+      int count = RoundUp(i.InputInt32(0), 2);
       Register prev = __ StackPointer();
-      __ SetStackPointer(csp);
-      __ Claim(count);
-      __ SetStackPointer(prev);
-      frame_access_state()->IncreaseSPDelta(count);
+      if (prev.Is(jssp)) {
+        // TODO(titzer): make this a macro-assembler method.
+        // Align the CSP and store the previous JSSP on the stack.
+        UseScratchRegisterScope scope(masm());
+        Register tmp = scope.AcquireX();
+
+        int sp_alignment = __ ActivationFrameAlignment();
+        __ Sub(tmp, jssp, kPointerSize);
+        __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
+        __ Mov(csp, tmp);
+        __ Str(jssp, MemOperand(csp));
+        if (count > 0) {
+          __ SetStackPointer(csp);
+          __ Claim(count);
+          __ SetStackPointer(prev);
+        }
+      } else {
+        __ AssertCspAligned();
+        if (count > 0) {
+          __ Claim(count);
+          frame_access_state()->IncreaseSPDelta(count);
+        }
+      }
       break;
     }
     case kArm64ClaimJSSP: {
       int count = i.InputInt32(0);
       if (csp.Is(__ StackPointer())) {
-        // No JSP is set up. Compute it from the CSP.
-        int even = RoundUp(count, 2);
-        __ Sub(jssp, csp, count * kPointerSize);
-        __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
-        frame_access_state()->IncreaseSPDelta(even);
+        // No JSSP is set up. Compute it from the CSP.
+        __ AssertCspAligned();
+        if (count > 0) {
+          int even = RoundUp(count, 2);
+          __ Sub(jssp, csp, count * kPointerSize);
+          __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
+          frame_access_state()->IncreaseSPDelta(even);
+        } else {
+          __ Mov(jssp, csp);
+        }
       } else {
         // JSSP is the current stack pointer, just use regular Claim().
         __ Claim(count);
@@ -1424,34 +1503,38 @@
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
 }
 
+void CodeGenerator::AssembleSetupStackPointer() {
+  const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
+    __ SetStackPointer(csp);
+  } else {
+    __ SetStackPointer(jssp);
+  }
+}
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ SetStackPointer(csp);
-    __ Push(lr, fp);
-    __ Mov(fp, csp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ SetStackPointer(jssp);
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    if (descriptor->UseNativeStack()) {
-      __ SetStackPointer(csp);
-    } else {
-      __ SetStackPointer(jssp);
-    }
-    __ StubPrologue();
-  } else {
-    if (descriptor->UseNativeStack()) {
-      __ SetStackPointer(csp);
-    } else {
-      __ SetStackPointer(jssp);
-    }
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (descriptor->UseNativeStack()) {
+    __ AssertCspAligned();
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsJSFunctionCall()) {
+      DCHECK(!descriptor->UseNativeStack());
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      if (descriptor->IsCFunctionCall()) {
+        __ Push(lr, fp);
+        __ Mov(fp, masm_.StackPointer());
+        __ Claim(stack_shrink_slots);
+      } else {
+        __ StubPrologue(info()->GetOutputStackFrameType(),
+                        frame()->GetTotalFrameSlotCount());
+      }
+    }
+  }
+
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
     __ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1465,15 +1548,9 @@
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
-  // If frame()->needs_frame() is false, then
-  // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
-  if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
-    // The system stack pointer requires 16-byte alignment at function call
-    // boundaries.
-
-    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+  if (descriptor->IsJSFunctionCall()) {
+    __ Claim(stack_shrink_slots);
   }
-  __ Claim(stack_shrink_slots);
 
   // Save FP registers.
   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
@@ -1518,27 +1595,27 @@
 
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   if (descriptor->IsCFunctionCall()) {
-    __ Mov(csp, fp);
-    __ Pop(fp, lr);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ B(&return_label_);
       return;
     } else {
       __ Bind(&return_label_);
+      AssembleDeconstructFrame();
       if (descriptor->UseNativeStack()) {
-        __ Mov(csp, fp);
         pop_count += (pop_count & 1);  // align
-      } else {
-        __ Mov(jssp, fp);
       }
-      __ Pop(fp, lr);
     }
   } else if (descriptor->UseNativeStack()) {
     pop_count += (pop_count & 1);  // align
   }
   __ Drop(pop_count);
+
+  if (descriptor->UseNativeStack()) {
+    __ AssertCspAligned();
+  }
   __ Ret();
 }
 
@@ -1576,9 +1653,9 @@
       if (src.type() == Constant::kHeapObject) {
         Handle<HeapObject> src_object = src.ToHeapObject();
         Heap::RootListIndex index;
-        int offset;
-        if (IsMaterializableFromFrame(src_object, &offset)) {
-          __ Ldr(dst, MemOperand(fp, offset));
+        int slot;
+        if (IsMaterializableFromFrame(src_object, &slot)) {
+          __ Ldr(dst, g.SlotToMemOperand(slot, masm()));
         } else if (IsMaterializableFromRoot(src_object, &index)) {
           __ LoadRoot(dst, index);
         } else {
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 26a2896..d90deae 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -289,8 +289,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -413,9 +418,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MRR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -1283,6 +1286,9 @@
   VisitRR(this, kArm64Float64ToUint32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kArm64Float64ToUint32, node);
+}
 
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Arm64OperandGenerator g(this);
@@ -1628,20 +1634,20 @@
     Node* node) {
   Arm64OperandGenerator g(this);
 
+  bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
   bool to_native_stack = descriptor->UseNativeStack();
 
+  bool always_claim = to_native_stack != from_native_stack;
+
   int claim_count = static_cast<int>(arguments->size());
   int slot = claim_count - 1;
-  if (to_native_stack) {
-    // Native stack must always be aligned to 16 (2 words).
-    claim_count = RoundUp(claim_count, 2);
-  }
-  // TODO(titzer): claim and poke probably take small immediates.
   // Bump the stack pointer(s).
-  if (claim_count > 0) {
+  if (claim_count > 0 || always_claim) {
+    // TODO(titzer): claim and poke probably take small immediates.
     // TODO(titzer): it would be better to bump the csp here only
     //                and emit paired stores with increment for non c frames.
     ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+    // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
     Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
   }
 
@@ -1662,6 +1668,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1674,6 +1681,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1789,85 +1799,72 @@
   }
 }
 
-}  // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
-                                      BasicBlock* fbranch) {
-  OperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
-  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
-  // Try to combine with comparisons against 0 by simply inverting the branch.
-  while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-    Int32BinopMatcher m(value);
-    if (m.right().Is(0)) {
-      user = value;
-      value = m.left().node();
-      cont.Negate();
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWord32Compare(this, value, &cont);
+      case IrOpcode::kWord32Equal: {
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
       case IrOpcode::kInt32LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kInt32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kUint32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kUint32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWord32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kWord64Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kInt64LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kInt64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kUint64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kUint64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
                                 kArithmeticImm);
       case IrOpcode::kFloat32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThan:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThan);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat64Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThan:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThan);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kProjection:
         // Check if this is the overflow output projection of an
         // <Operation>WithOverflow node.
@@ -1879,24 +1876,24 @@
           // *AFTER* this branch).
           Node* const node = value->InputAt(0);
           Node* const result = NodeProperties::FindProjection(node, 0);
-          if (result == nullptr || IsDefined(result)) {
+          if (result == nullptr || selector->IsDefined(result)) {
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kArm64Add32, kArithmeticImm, cont);
               case IrOpcode::kInt32SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kArm64Sub32, kArithmeticImm, cont);
               case IrOpcode::kInt64AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
+                                                     kArithmeticImm, cont);
               case IrOpcode::kInt64SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
-                                                     kArithmeticImm, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Sub,
+                                                     kArithmeticImm, cont);
               default:
                 break;
             }
@@ -1904,55 +1901,84 @@
         }
         break;
       case IrOpcode::kInt32Add:
-        return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Cmn32, cont, true,
                                 kArithmeticImm);
       case IrOpcode::kInt32Sub:
-        return VisitWord32Compare(this, value, &cont);
+        return VisitWord32Compare(selector, value, cont);
       case IrOpcode::kWord32And: {
         Int32BinopMatcher m(value);
-        if (m.right().HasValue() &&
+        if (cont->IsBranch() && m.right().HasValue() &&
             (base::bits::CountPopulation32(m.right().Value()) == 1)) {
           // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont.condition() == kEqual) ||
-                 (cont.condition() == kNotEqual));
-          Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
-               g.UseRegister(m.left().node()),
-               g.TempImmediate(
-                   base::bits::CountTrailingZeros32(m.right().Value())),
-               g.Label(cont.true_block()), g.Label(cont.false_block()));
+          DCHECK((cont->condition() == kEqual) ||
+                 (cont->condition() == kNotEqual));
+          selector->Emit(
+              cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+              g.UseRegister(m.left().node()),
+              g.TempImmediate(
+                  base::bits::CountTrailingZeros32(m.right().Value())),
+              g.Label(cont->true_block()), g.Label(cont->false_block()));
           return;
         }
-        return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
                                 kLogical32Imm);
       }
       case IrOpcode::kWord64And: {
         Int64BinopMatcher m(value);
-        if (m.right().HasValue() &&
+        if (cont->IsBranch() && m.right().HasValue() &&
             (base::bits::CountPopulation64(m.right().Value()) == 1)) {
           // If the mask has only one bit set, we can use tbz/tbnz.
-          DCHECK((cont.condition() == kEqual) ||
-                 (cont.condition() == kNotEqual));
-          Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
-               g.UseRegister(m.left().node()),
-               g.TempImmediate(
-                   base::bits::CountTrailingZeros64(m.right().Value())),
-               g.Label(cont.true_block()), g.Label(cont.false_block()));
+          DCHECK((cont->condition() == kEqual) ||
+                 (cont->condition() == kNotEqual));
+          selector->Emit(
+              cont->Encode(kArm64TestAndBranch), g.NoOutput(),
+              g.UseRegister(m.left().node()),
+              g.TempImmediate(
+                  base::bits::CountTrailingZeros64(m.right().Value())),
+              g.Label(cont->true_block()), g.Label(cont->false_block()));
           return;
         }
-        return VisitWordCompare(this, value, kArm64Tst, &cont, true,
+        return VisitWordCompare(selector, value, kArm64Tst, cont, true,
                                 kLogical64Imm);
       }
       default:
         break;
     }
+    break;
   }
 
   // Branch could not be combined with a compare, compare against 0 and branch.
-  Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
-       g.UseRegister(value), g.Label(cont.true_block()),
-       g.Label(cont.false_block()));
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+                   g.UseRegister(value), g.Label(cont->true_block()),
+                   g.Label(cont->false_block()));
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
+                             g.UseRegister(value), g.UseRegister(value),
+                             cont->frame_state());
+  }
 }
 
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   Arm64OperandGenerator g(this);
@@ -1984,7 +2010,7 @@
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
   Node* const user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* const value = m.left().node();
@@ -2018,32 +2044,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
   Node* const user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* const value = m.left().node();
@@ -2064,7 +2092,7 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
                                          kArithmeticImm, &cont);
   }
@@ -2075,7 +2103,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
                                          kArithmeticImm, &cont);
   }
@@ -2086,7 +2114,7 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
                                          &cont);
   }
@@ -2097,7 +2125,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
                                          &cont);
   }
@@ -2107,61 +2135,65 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kFloatLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kFloatLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index abcf828..89bb619 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -616,12 +616,6 @@
     NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
   }
 
-  // Visit illegal re-declaration and bail out if it exists.
-  if (scope->HasIllegalRedeclaration()) {
-    VisitForEffect(scope->GetIllegalRedeclaration());
-    return;
-  }
-
   // Visit declarations within the function scope.
   VisitDeclarations(scope->declarations());
 
@@ -646,7 +640,7 @@
   }
 
   NonLiveFrameStateSlotReplacer replacer(
-      &state_values_cache_, jsgraph()->UndefinedConstant(),
+      &state_values_cache_, jsgraph()->OptimizedOutConstant(),
       liveness_analyzer()->local_count(), local_zone());
   Variable* arguments = info()->scope()->arguments();
   if (arguments != nullptr && arguments->IsStackAllocated()) {
@@ -1448,9 +1442,11 @@
   }
   try_control.EndTry();
 
-  // Clear message object as we enter the catch block.
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  NewNode(javascript()->StoreMessage(), the_hole);
+  // If requested, clear message object as we enter the catch block.
+  if (stmt->clear_pending_message()) {
+    Node* the_hole = jsgraph()->TheHoleConstant();
+    NewNode(javascript()->StoreMessage(), the_hole);
+  }
 
   // Create a catch scope that binds the exception.
   Node* exception = try_control.GetExceptionNode();
@@ -1644,8 +1640,7 @@
     }
   }
 
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
+  // Set both the prototype and constructor to have fast properties.
   prototype = environment()->Pop();
   literal = environment()->Pop();
   const Operator* op =
@@ -1725,7 +1720,7 @@
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralObject(
       expr->constant_properties(), expr->ComputeFlags(true),
-      expr->literal_index());
+      expr->literal_index(), expr->properties_count());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1900,13 +1895,6 @@
     }
   }
 
-  // Transform literals that contain functions to fast properties.
-  literal = environment()->Top();  // Reload from operand stack.
-  if (expr->has_function()) {
-    const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
-    NewNode(op, literal);
-  }
-
   ast_context()->ProduceValue(environment()->Pop());
 }
 
@@ -1928,7 +1916,7 @@
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralArray(
       expr->constant_elements(), expr->ComputeFlags(true),
-      expr->literal_index());
+      expr->literal_index(), expr->values()->length());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -2576,22 +2564,12 @@
     return VisitCallJSRuntime(expr);
   }
 
-  const Runtime::Function* function = expr->function();
-
-  // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
-  if (function->function_id == Runtime::kInlineGeneratorNext ||
-      function->function_id == Runtime::kInlineGeneratorReturn ||
-      function->function_id == Runtime::kInlineGeneratorThrow) {
-    ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
-    return SetStackOverflow();
-  }
-
   // Evaluate all arguments to the runtime call.
   ZoneList<Expression*>* args = expr->arguments();
   VisitForValues(args);
 
   // Create node to perform the runtime call.
-  Runtime::FunctionId functionId = function->function_id;
+  Runtime::FunctionId functionId = expr->function()->function_id;
   const Operator* call = javascript()->CallRuntime(functionId, args->length());
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length());
@@ -2704,11 +2682,9 @@
   }
 
   // Convert old value into a number.
-  if (!is_strong(language_mode())) {
-    old_value = NewNode(javascript()->ToNumber(), old_value);
-    PrepareFrameState(old_value, expr->ToNumberId(),
-                      OutputFrameStateCombine::Push());
-  }
+  old_value = NewNode(javascript()->ToNumber(), old_value);
+  PrepareFrameState(old_value, expr->ToNumberId(),
+                    OutputFrameStateCombine::Push());
 
   // Create a proper eager frame state for the stores.
   environment()->Push(old_value);
@@ -2731,10 +2707,8 @@
     FrameStateBeforeAndAfter states(this, BailoutId::None());
     value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
                           expr->binary_op(), expr->CountBinOpFeedbackId());
-    // This should never deoptimize outside strong mode because otherwise we
-    // have converted to number before.
-    states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
-                                                       : BailoutId::None(),
+    // This should never deoptimize because we have converted to number before.
+    states.AddToNode(value, BailoutId::None(),
                      OutputFrameStateCombine::Ignore());
   }
 
@@ -2821,8 +2795,57 @@
   }
 }
 
+void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
+                                             Expression* sub_expr,
+                                             Node* nil_value) {
+  const Operator* op = nullptr;
+  switch (expr->op()) {
+    case Token::EQ:
+      op = javascript()->Equal();
+      break;
+    case Token::EQ_STRICT:
+      op = javascript()->StrictEqual();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  VisitForValue(sub_expr);
+  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  Node* value_to_compare = environment()->Pop();
+  Node* value = NewNode(op, value_to_compare, nil_value);
+  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  return ast_context()->ProduceValue(value);
+}
+
+void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
+                                                Expression* sub_expr,
+                                                Handle<String> check) {
+  VisitTypeofExpression(sub_expr);
+  FrameStateBeforeAndAfter states(this, sub_expr->id());
+  Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
+  Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
+                        jsgraph()->Constant(check));
+  states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+  return ast_context()->ProduceValue(value);
+}
 
 void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  // Check for a few fast cases. The AST visiting behavior must be in sync
+  // with the full codegen: We don't push both left and right values onto
+  // the expression stack when one side is a special-case literal.
+  Expression* sub_expr = nullptr;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    return VisitLiteralCompareTypeof(expr, sub_expr, check);
+  }
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    return VisitLiteralCompareNil(expr, sub_expr,
+                                  jsgraph()->UndefinedConstant());
+  }
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
+  }
+
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
@@ -2850,6 +2873,7 @@
       op = javascript()->GreaterThanOrEqual();
       break;
     case Token::INSTANCEOF:
+      DCHECK(!FLAG_harmony_instanceof);
       op = javascript()->InstanceOf();
       break;
     case Token::IN:
@@ -2984,23 +3008,25 @@
   ast_context()->ProduceValue(value);
 }
 
-
-void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
-  Node* operand;
-  if (expr->expression()->IsVariableProxy()) {
+void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
+  if (expr->IsVariableProxy()) {
     // Typeof does not throw a reference error on global variables, hence we
     // perform a non-contextual load in case the operand is a variable proxy.
-    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    VariableProxy* proxy = expr->AsVariableProxy();
     VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
     FrameStateBeforeAndAfter states(this, BeforeId(proxy));
-    operand =
-        BuildVariableLoad(proxy->var(), expr->expression()->id(), states, pair,
+    Node* load =
+        BuildVariableLoad(proxy->var(), expr->id(), states, pair,
                           OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
+    environment()->Push(load);
   } else {
-    VisitForValue(expr->expression());
-    operand = environment()->Pop();
+    VisitForValue(expr);
   }
-  Node* value = NewNode(javascript()->TypeOf(), operand);
+}
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  VisitTypeofExpression(expr->expression());
+  Node* value = NewNode(javascript()->TypeOf(), environment()->Pop());
   ast_context()->ProduceValue(value);
 }
 
@@ -3052,7 +3078,7 @@
 
 
 LanguageMode AstGraphBuilder::language_mode() const {
-  return info()->language_mode();
+  return current_scope()->language_mode();
 }
 
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 6cff237..e206db0 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -419,11 +419,20 @@
   void VisitTypeof(UnaryOperation* expr);
   void VisitNot(UnaryOperation* expr);
 
+  // Dispatched from VisitTypeof, VisitLiteralCompareTypeof.
+  void VisitTypeofExpression(Expression* expr);
+
   // Dispatched from VisitBinaryOperation.
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
 
+  // Dispatched from VisitCompareOperation.
+  void VisitLiteralCompareNil(CompareOperation* expr, Expression* sub_expr,
+                              Node* nil_value);
+  void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
+                                 Handle<String> check);
+
   // Dispatched from VisitForInStatement.
   void VisitForInAssignment(Expression* expr, Node* value,
                             const VectorSlotPair& feedback,
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index bc56e73..427612c 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -15,11 +15,11 @@
 BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
                                      Zone* zone)
     : AdvancedReducer(editor),
+      jsgraph_(js_graph),
       node_conditions_(zone, js_graph->graph()->NodeCount()),
       zone_(zone),
       dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
 
-
 BranchElimination::~BranchElimination() {}
 
 
@@ -27,6 +27,9 @@
   switch (node->opcode()) {
     case IrOpcode::kDead:
       return NoChange();
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      return ReduceDeoptimizeConditional(node);
     case IrOpcode::kMerge:
       return ReduceMerge(node);
     case IrOpcode::kLoop:
@@ -76,6 +79,41 @@
   return TakeConditionsFromFirstControl(node);
 }
 
+Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+         node->opcode() == IrOpcode::kDeoptimizeUnless);
+  bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  Node* condition = NodeProperties::GetValueInput(node, 0);
+  Node* frame_state = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  ControlPathConditions const* conditions = node_conditions_.Get(control);
+  // If we do not know anything about the predecessor, do not propagate just
+  // yet because we will have to recompute anyway once we compute the
+  // predecessor.
+  if (conditions == nullptr) {
+    DCHECK_NULL(node_conditions_.Get(node));
+    return NoChange();
+  }
+  Maybe<bool> condition_value = conditions->LookupCondition(condition);
+  if (condition_value.IsJust()) {
+    // If we know the condition we can discard the branch.
+    if (condition_is_true == condition_value.FromJust()) {
+      // We don't to update the conditions here, because we're replacing with
+      // the {control} node that already contains the right information.
+      return Replace(control);
+    } else {
+      control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+                                 frame_state, effect, control);
+      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+      NodeProperties::MergeControlToEnd(graph(), common(), control);
+      Revisit(graph()->end());
+      return Replace(dead());
+    }
+  }
+  return UpdateConditions(
+      node, conditions->AddCondition(zone_, condition, condition_is_true));
+}
 
 Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
   // Add the condition to the list arriving from the input branch.
@@ -264,6 +302,12 @@
   return false;
 }
 
+Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
+
+CommonOperatorBuilder* BranchElimination::common() const {
+  return jsgraph()->common();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/branch-elimination.h b/src/compiler/branch-elimination.h
index a7ac926..7abeeca 100644
--- a/src/compiler/branch-elimination.h
+++ b/src/compiler/branch-elimination.h
@@ -11,6 +11,8 @@
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
 class JSGraph;
 
 
@@ -73,6 +75,7 @@
   };
 
   Reduction ReduceBranch(Node* node);
+  Reduction ReduceDeoptimizeConditional(Node* node);
   Reduction ReduceIf(Node* node, bool is_true_branch);
   Reduction ReduceLoop(Node* node);
   Reduction ReduceMerge(Node* node);
@@ -84,7 +87,11 @@
                              const ControlPathConditions* conditions);
 
   Node* dead() const { return dead_; }
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
 
+  JSGraph* const jsgraph_;
   PathConditionsForControlNodes node_conditions_;
   Zone* zone_;
   Node* dead_;
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index e28c19d..2249cbc 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -444,7 +444,7 @@
       bytecode_array_(handle(info->shared_info()->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
-      feedback_vector_(info->feedback_vector()),
+      feedback_vector_(handle(info->shared_info()->feedback_vector())),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
@@ -570,17 +570,11 @@
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaSmi8() {
+void BytecodeGraphBuilder::VisitLdaSmi() {
   Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaConstantWide() {
-  Node* node =
-      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
-  environment()->BindAccumulator(node);
-}
-
 void BytecodeGraphBuilder::VisitLdaConstant() {
   Node* node =
       jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -629,8 +623,6 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
-
 void BytecodeGraphBuilder::BuildLoadGlobal(
     TypeofMode typeof_mode) {
   FrameStateBeforeAndAfter states(this);
@@ -652,14 +644,6 @@
   BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-void BytecodeGraphBuilder::VisitLdaGlobalWide() {
-  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
-  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
-}
-
 void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
@@ -681,14 +665,6 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
-  BuildStoreGlobal(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
-  BuildStoreGlobal(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::VisitLdaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
@@ -704,8 +680,6 @@
   environment()->BindAccumulator(node);
 }
 
-void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
-
 void BytecodeGraphBuilder::VisitStaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
@@ -718,8 +692,6 @@
   NewNode(op, context, value);
 }
 
-void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
-
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* name =
@@ -752,12 +724,6 @@
   environment()->BindAccumulator(store, &states);
 }
 
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
-  VisitLdaLookupSlotInsideTypeof();
-}
-
 void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
   BuildStaLookupSlot(LanguageMode::SLOPPY);
 }
@@ -766,14 +732,6 @@
   BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
-  VisitStaLookupSlotSloppy();
-}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
-  VisitStaLookupSlotStrict();
-}
-
 void BytecodeGraphBuilder::BuildNamedLoad() {
   FrameStateBeforeAndAfter states(this);
   Node* object =
@@ -790,8 +748,6 @@
 
 void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
 
-void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
-
 void BytecodeGraphBuilder::BuildKeyedLoad() {
   FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
@@ -807,8 +763,6 @@
 
 void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
 
-void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
-
 void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -832,14 +786,6 @@
   BuildNamedStore(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
-  BuildNamedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide() {
-  BuildNamedStore(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -863,14 +809,6 @@
   BuildKeyedStore(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
-  BuildKeyedStore(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
-  BuildKeyedStore(LanguageMode::STRICT);
-}
-
 void BytecodeGraphBuilder::VisitPushContext() {
   Node* new_context = environment()->LookupAccumulator();
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
@@ -888,14 +826,12 @@
   Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   PretenureFlag tenured =
-      bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+      bytecode_iterator().GetFlagOperand(1) ? TENURED : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
 
-void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
-
 void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
   FrameStateBeforeAndAfter states(this);
   const Operator* op = javascript()->CreateArguments(type);
@@ -921,61 +857,39 @@
   environment()->BindAccumulator(literal, &states);
 }
 
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
   Handle<String> constant_pattern =
       Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
   const Operator* op = javascript()->CreateLiteralRegExp(
       constant_pattern, literal_flags, literal_index);
   BuildCreateLiteral(op);
 }
 
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
-  BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
-  BuildCreateRegExpLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
   Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  int number_of_elements = constant_elements->length();
   const Operator* op = javascript()->CreateLiteralArray(
-      constant_elements, literal_flags, literal_index);
-  BuildCreateLiteral(op);
-}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
-  BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
-  BuildCreateArrayLiteral();
-}
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
-  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
-  int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
-  const Operator* op = javascript()->CreateLiteralObject(
-      constant_properties, literal_flags, literal_index);
+      constant_elements, literal_flags, literal_index, number_of_elements);
   BuildCreateLiteral(op);
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  BuildCreateObjectLiteral();
+  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  // TODO(mstarzinger): Thread through number of properties.
+  int number_of_properties = constant_properties->length() / 2;
+  const Operator* op = javascript()->CreateLiteralObject(
+      constant_properties, literal_flags, literal_index, number_of_properties);
+  BuildCreateLiteral(op);
 }
 
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
-  BuildCreateObjectLiteral();
-}
-
-
 Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
                                                  Node* callee,
                                                  interpreter::Register receiver,
@@ -1013,17 +927,15 @@
 
 void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
 
-void BytecodeGraphBuilder::VisitCallWide() {
-  BuildCall(TailCallMode::kDisallow);
+void BytecodeGraphBuilder::VisitTailCall() {
+  TailCallMode tail_call_mode =
+      bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
+          ? TailCallMode::kAllow
+          : TailCallMode::kDisallow;
+  BuildCall(tail_call_mode);
 }
 
-void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
-
-void BytecodeGraphBuilder::VisitTailCallWide() {
-  BuildCall(TailCallMode::kAllow);
-}
-
-void BytecodeGraphBuilder::BuildCallJSRuntime() {
+void BytecodeGraphBuilder::VisitCallJSRuntime() {
   FrameStateBeforeAndAfter states(this);
   Node* callee =
       BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
@@ -1036,10 +948,6 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
-
 Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
     const Operator* call_runtime_op, interpreter::Register first_arg,
     size_t arity) {
@@ -1053,10 +961,10 @@
   return value;
 }
 
-void BytecodeGraphBuilder::BuildCallRuntime() {
+void BytecodeGraphBuilder::VisitCallRuntime() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
@@ -1066,14 +974,10 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
-
-void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
   FrameStateBeforeAndAfter states(this);
-  Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
   interpreter::Register first_return =
@@ -1085,12 +989,18 @@
   environment()->BindRegistersToProjections(first_return, return_pair, &states);
 }
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
-  BuildCallRuntimeForPair();
-}
+void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
+  FrameStateBeforeAndAfter states(this);
+  Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
+      bytecode_iterator().GetRuntimeIdOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
-  BuildCallRuntimeForPair();
+  // Create node to perform the runtime call. Turbofan will take care of the
+  // lowering.
+  const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+  Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+  environment()->BindAccumulator(value, &states);
 }
 
 Node* BytecodeGraphBuilder::ProcessCallNewArguments(
@@ -1108,7 +1018,7 @@
   return value;
 }
 
-void BytecodeGraphBuilder::BuildCallConstruct() {
+void BytecodeGraphBuilder::VisitNew() {
   FrameStateBeforeAndAfter states(this);
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
@@ -1124,10 +1034,6 @@
   environment()->BindAccumulator(value, &states);
 }
 
-void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
-
-void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
-
 void BytecodeGraphBuilder::BuildThrow() {
   FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
@@ -1282,10 +1188,6 @@
   BuildCompareOp(javascript()->StrictEqual());
 }
 
-void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
-  BuildCompareOp(javascript()->StrictNotEqual());
-}
-
 void BytecodeGraphBuilder::VisitTestLessThan() {
   BuildCompareOp(javascript()->LessThan());
 }
@@ -1307,6 +1209,7 @@
 }
 
 void BytecodeGraphBuilder::VisitTestInstanceOf() {
+  DCHECK(!FLAG_harmony_instanceof);
   BuildCompareOp(javascript()->InstanceOf());
 }
 
@@ -1332,7 +1235,6 @@
 
 void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
 
 void BytecodeGraphBuilder::VisitJumpIfTrue() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
@@ -1342,10 +1244,6 @@
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
-  BuildJumpIfEqual(jsgraph()->TrueConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfFalse() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
@@ -1354,10 +1252,6 @@
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
-  BuildJumpIfEqual(jsgraph()->FalseConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
@@ -1366,10 +1260,6 @@
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
-  BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
@@ -1378,20 +1268,12 @@
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
-  BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
 
 void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
   BuildJumpIfNotHole();
 }
 
-void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
-  BuildJumpIfNotHole();
-}
-
 void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
@@ -1400,10 +1282,6 @@
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
-  BuildJumpIfEqual(jsgraph()->NullConstant());
-}
-
 void BytecodeGraphBuilder::VisitJumpIfUndefined() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
@@ -1412,10 +1290,6 @@
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
-  BuildJumpIfEqual(jsgraph()->UndefinedConstant());
-}
-
 void BytecodeGraphBuilder::VisitStackCheck() {
   FrameStateBeforeAndAfter states(this);
   Node* node = NewNode(javascript()->StackCheck());
@@ -1451,8 +1325,6 @@
 
 void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
 
-void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
-
 void BytecodeGraphBuilder::VisitForInDone() {
   FrameStateBeforeAndAfter states(this);
   Node* index =
@@ -1482,8 +1354,6 @@
 
 void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
 
-void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
-
 void BytecodeGraphBuilder::VisitForInStep() {
   FrameStateBeforeAndAfter states(this);
   Node* index =
@@ -1492,6 +1362,21 @@
   environment()->BindAccumulator(index, &states);
 }
 
+void BytecodeGraphBuilder::VisitWide() {
+  // Consumed by the BytecodeArrayIterator.
+  UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitExtraWide() {
+  // Consumed by the BytecodeArrayIterator.
+  UNREACHABLE();
+}
+
+void BytecodeGraphBuilder::VisitIllegal() {
+  // Never present in valid bytecode.
+  UNREACHABLE();
+}
+
 void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
   if (merge_environments_[current_offset] != nullptr) {
     if (environment() != nullptr) {
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 2fa5967..c842c24 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -111,9 +111,6 @@
                                     size_t arity);
 
   void BuildCreateLiteral(const Operator* op);
-  void BuildCreateRegExpLiteral();
-  void BuildCreateArrayLiteral();
-  void BuildCreateObjectLiteral();
   void BuildCreateArguments(CreateArgumentsType type);
   void BuildLoadGlobal(TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
@@ -124,10 +121,6 @@
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildStaLookupSlot(LanguageMode language_mode);
   void BuildCall(TailCallMode tail_call_mode);
-  void BuildCallJSRuntime();
-  void BuildCallRuntime();
-  void BuildCallRuntimeForPair();
-  void BuildCallConstruct();
   void BuildThrow();
   void BuildBinaryOp(const Operator* op);
   void BuildCompareOp(const Operator* op);
@@ -135,6 +128,7 @@
   void BuildCastOperator(const Operator* op);
   void BuildForInPrepare();
   void BuildForInNext();
+  void BuildInvokeIntrinsic();
 
   // Control flow plumbing.
   void BuildJump();
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index 783d9d6..b38e529 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -123,6 +123,26 @@
       d20.bit() | d21.bit() | d22.bit() | d23.bit() | d24.bit() | d25.bit() | \
       d26.bit() | d27.bit() | d28.bit() | d29.bit() | d30.bit() | d31.bit()
 
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+  r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS                                        \
+  d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
+      d14.bit() | d15.bit()
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define PARAM_REGISTERS r2, r3, r4, r5, r6
+#define CALLEE_SAVE_REGISTERS \
+  r6.bit() | r7.bit() | r8.bit() | r9.bit() | r10.bit() | ip.bit() | r13.bit()
+#define CALLEE_SAVE_FP_REGISTERS (d4.bit() | d6.bit())
+
 #else
 // ===========================================================================
 // == unknown ================================================================
@@ -210,6 +230,11 @@
   // The target for C calls is always an address (i.e. machine pointer).
   MachineType target_type = MachineType::Pointer();
   LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+  CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+  if (set_initialize_root_flag) {
+    flags |= CallDescriptor::kInitializeRootRegister;
+  }
+
   return new (zone) CallDescriptor(  // --
       CallDescriptor::kCallAddress,  // kind
       target_type,                   // target MachineType
@@ -220,10 +245,7 @@
       Operator::kNoProperties,       // properties
       kCalleeSaveRegisters,          // callee-saved registers
       kCalleeSaveFPRegisters,        // callee-saved fp regs
-      set_initialize_root_flag ?     // flags
-          CallDescriptor::kInitializeRootRegister
-                               : CallDescriptor::kNoFlags,
-      "c-call");
+      flags, "c-call");
 }
 
 }  // namespace compiler
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index e217f37..907b36a 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -55,6 +55,8 @@
       return ObjectIsSmi(node);
     case IrOpcode::kObjectIsNumber:
       return ObjectIsNumber(node);
+    case IrOpcode::kObjectIsUndetectable:
+      return ObjectIsUndetectable(node);
     default:
       return NoChange();
   }
@@ -603,6 +605,13 @@
       graph()->start(), control);
 }
 
+Node* ChangeLowering::LoadMapBitField(Node* map) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::Uint8()), map,
+      jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
+      graph()->start(), graph()->start());
+}
+
 Node* ChangeLowering::LoadMapInstanceType(Node* map) {
   return graph()->NewNode(
       machine()->Load(MachineType::Uint8()), map,
@@ -650,6 +659,31 @@
   return Changed(node);
 }
 
+Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(
+      machine()->Word32Equal(),
+      graph()->NewNode(
+          machine()->Word32Equal(),
+          graph()->NewNode(machine()->Word32And(),
+                           jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
+                           LoadMapBitField(LoadHeapObjectMap(input, if_false))),
+          jsgraph()->Int32Constant(0)),
+      jsgraph()->Int32Constant(0));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
 Reduction ChangeLowering::ObjectIsSmi(Node* node) {
   node->ReplaceInput(0,
                      graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index defadd9..7e5078b 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -58,11 +58,13 @@
 
   Node* IsSmi(Node* value);
   Node* LoadHeapObjectMap(Node* object, Node* control);
+  Node* LoadMapBitField(Node* map);
   Node* LoadMapInstanceType(Node* map);
 
   Reduction ObjectIsNumber(Node* node);
   Reduction ObjectIsReceiver(Node* node);
   Reduction ObjectIsSmi(Node* node);
+  Reduction ObjectIsUndetectable(Node* node);
 
   Node* ComputeIndex(const ElementAccess& access, Node* const key);
   Graph* graph() const;
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 7295948..7de32c5 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -139,6 +139,19 @@
   Instruction* instr_;
 };
 
+// Eager deoptimization exit.
+class DeoptimizationExit : public ZoneObject {
+ public:
+  explicit DeoptimizationExit(int deoptimization_id)
+      : deoptimization_id_(deoptimization_id) {}
+
+  int deoptimization_id() const { return deoptimization_id_; }
+  Label* label() { return &label_; }
+
+ private:
+  int const deoptimization_id_;
+  Label label_;
+};
 
 // Generator for out-of-line code that is emitted after the main code is done.
 class OutOfLineCode : public ZoneObject {
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 712cfe0..086da56 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -31,7 +31,6 @@
   size_t const target_count_;
 };
 
-
 CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
                              InstructionSequence* code, CompilationInfo* info)
     : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
@@ -45,6 +44,7 @@
       resolver_(this),
       safepoints_(code->zone()),
       handlers_(code->zone()),
+      deoptimization_exits_(code->zone()),
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
       inlined_function_count_(0),
@@ -56,12 +56,8 @@
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
     new (&labels_[i]) Label;
   }
-  if (code->ContainsCall()) {
-    frame->MarkNeedsFrame();
-  }
 }
 
-
 Handle<Code> CodeGenerator::GenerateCode() {
   CompilationInfo* info = this->info();
 
@@ -80,14 +76,11 @@
   }
   // Architecture-specific, linkage-specific prologue.
   info->set_prologue_offset(masm()->pc_offset());
-  AssemblePrologue();
-  if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
-    masm()->InitializeRootRegister();
-  }
 
   // Define deoptimization literals for all inlined functions.
   DCHECK_EQ(0u, deoptimization_literals_.size());
-  for (auto& inlined : info->inlined_functions()) {
+  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+       info->inlined_functions()) {
     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
       DefineDeoptimizationLiteral(inlined.shared_info);
     }
@@ -96,15 +89,19 @@
 
   // Define deoptimization literals for all unoptimized code objects of inlined
   // functions. This ensures unoptimized code is kept alive by optimized code.
-  for (auto& inlined : info->inlined_functions()) {
+  for (const CompilationInfo::InlinedFunctionHolder& inlined :
+       info->inlined_functions()) {
     if (!inlined.shared_info.is_identical_to(info->shared_info())) {
       DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
     }
   }
 
+  // Finish the Frame
+  frame()->AlignFrame(kFrameAlignmentInBytes);
+  AssembleSetupStackPointer();
   // Assemble all non-deferred blocks, followed by deferred ones.
   for (int deferred = 0; deferred < 2; ++deferred) {
-    for (auto const block : code()->instruction_blocks()) {
+    for (const InstructionBlock* block : code()->instruction_blocks()) {
       if (block->IsDeferred() == (deferred == 0)) {
         continue;
       }
@@ -141,9 +138,26 @@
         SNPrintF(buffer, " --");
         masm()->RecordComment(buffer_start);
       }
+
+      frame_access_state()->MarkHasFrame(block->needs_frame());
+
       masm()->bind(GetLabel(current_block_));
-      for (int i = block->code_start(); i < block->code_end(); ++i) {
-        AssembleInstruction(code()->InstructionAt(i));
+      if (block->must_construct_frame()) {
+        AssemblePrologue();
+        // We need to setup the root register after we assemble the prologue, to
+        // avoid clobbering callee saved registers in case of C linkage and
+        // using the roots.
+        // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
+        if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+          masm()->InitializeRootRegister();
+        }
+      }
+
+      if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+        AssembleBlock(block);
+      } else {
+        AssembleBlock(block);
       }
     }
   }
@@ -158,6 +172,12 @@
     }
   }
 
+  // Assemble all eager deoptimization exits.
+  for (DeoptimizationExit* exit : deoptimization_exits_) {
+    masm()->bind(exit->label());
+    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER);
+  }
+
   // Ensure there is space for lazy deoptimization in the code.
   if (info->ShouldEnsureSpaceForLazyDeopt()) {
     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
@@ -211,7 +231,8 @@
 
   // Emit a code line info recording stop event.
   void* line_info = recorder->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(
+                                AbstractCode::cast(*result), line_info));
 
   return result;
 }
@@ -232,7 +253,7 @@
       safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
   int stackSlotToSpillSlotDelta =
       frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
-  for (auto& operand : references->reference_operands()) {
+  for (const InstructionOperand& operand : references->reference_operands()) {
     if (operand.IsStackSlot()) {
       int index = LocationOperand::cast(operand).index();
       DCHECK(index >= 0);
@@ -250,16 +271,15 @@
   }
 }
 
-
 bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
-                                              int* offset_return) {
+                                              int* slot_return) {
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     if (info()->has_context() && object.is_identical_to(info()->context()) &&
         !info()->is_osr()) {
-      *offset_return = StandardFrameConstants::kContextOffset;
+      *slot_return = Frame::kContextSlot;
       return true;
     } else if (object.is_identical_to(info()->closure())) {
-      *offset_return = JavaScriptFrameConstants::kFunctionOffset;
+      *slot_return = Frame::kJSFunctionSlot;
       return true;
     }
   }
@@ -282,43 +302,82 @@
   return false;
 }
 
+void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+  for (int i = block->code_start(); i < block->code_end(); ++i) {
+    Instruction* instr = code()->InstructionAt(i);
+    AssembleInstruction(instr, block);
+  }
+}
 
-void CodeGenerator::AssembleInstruction(Instruction* instr) {
+void CodeGenerator::AssembleInstruction(Instruction* instr,
+                                        const InstructionBlock* block) {
   AssembleGaps(instr);
+  DCHECK_IMPLIES(
+      block->must_deconstruct_frame(),
+      instr != code()->InstructionAt(block->last_instruction_index()) ||
+          instr->IsRet() || instr->IsJump());
+  if (instr->IsJump() && block->must_deconstruct_frame()) {
+    AssembleDeconstructFrame();
+  }
   AssembleSourcePosition(instr);
   // Assemble architecture-specific code for the instruction.
   AssembleArchInstruction(instr);
 
   FlagsMode mode = FlagsModeField::decode(instr->opcode());
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
-  if (mode == kFlags_branch) {
-    // Assemble a branch after this instruction.
-    InstructionOperandConverter i(this, instr);
-    RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
-    RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
+  switch (mode) {
+    case kFlags_branch: {
+      // Assemble a branch after this instruction.
+      InstructionOperandConverter i(this, instr);
+      RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+      RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
 
-    if (true_rpo == false_rpo) {
-      // redundant branch.
-      if (!IsNextInAssemblyOrder(true_rpo)) {
-        AssembleArchJump(true_rpo);
+      if (true_rpo == false_rpo) {
+        // redundant branch.
+        if (!IsNextInAssemblyOrder(true_rpo)) {
+          AssembleArchJump(true_rpo);
+        }
+        return;
       }
-      return;
+      if (IsNextInAssemblyOrder(true_rpo)) {
+        // true block is next, can fall through if condition negated.
+        std::swap(true_rpo, false_rpo);
+        condition = NegateFlagsCondition(condition);
+      }
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = GetLabel(true_rpo);
+      branch.false_label = GetLabel(false_rpo);
+      branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+      break;
     }
-    if (IsNextInAssemblyOrder(true_rpo)) {
-      // true block is next, can fall through if condition negated.
-      std::swap(true_rpo, false_rpo);
-      condition = NegateFlagsCondition(condition);
+    case kFlags_deoptimize: {
+      // Assemble a conditional eager deoptimization after this instruction.
+      InstructionOperandConverter i(this, instr);
+      size_t frame_state_offset = MiscField::decode(instr->opcode());
+      DeoptimizationExit* const exit =
+          AddDeoptimizationExit(instr, frame_state_offset);
+      Label continue_label;
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = exit->label();
+      branch.false_label = &continue_label;
+      branch.fallthru = true;
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+      masm()->bind(&continue_label);
+      break;
     }
-    BranchInfo branch;
-    branch.condition = condition;
-    branch.true_label = GetLabel(true_rpo);
-    branch.false_label = GetLabel(false_rpo);
-    branch.fallthru = IsNextInAssemblyOrder(false_rpo);
-    // Assemble architecture-specific branch.
-    AssembleArchBranch(instr, &branch);
-  } else if (mode == kFlags_set) {
-    // Assemble a boolean materialization after this instruction.
-    AssembleArchBoolean(instr, condition);
+    case kFlags_set: {
+      // Assemble a boolean materialization after this instruction.
+      AssembleArchBoolean(instr, condition);
+      break;
+    }
+    case kFlags_none: {
+      break;
+    }
   }
 }
 
@@ -595,6 +654,9 @@
           shared_info_id,
           static_cast<unsigned int>(descriptor->parameters_count()));
       break;
+    case FrameStateType::kTailCallerFunction:
+      translation->BeginTailCallerFrame(shared_info_id);
+      break;
     case FrameStateType::kConstructStub:
       translation->BeginConstructStubFrame(
           shared_info_id,
@@ -714,15 +776,22 @@
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
 
+DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
+    Instruction* instr, size_t frame_state_offset) {
+  int const deoptimization_id = BuildTranslation(
+      instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
+  DeoptimizationExit* const exit =
+      new (zone()) DeoptimizationExit(deoptimization_id);
+  deoptimization_exits_.push_back(exit);
+  return exit;
+}
 
 int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
-  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  int spill_slots = frame()->GetSpillSlotCount();
-  bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
   // Leave the PC on the stack on platforms that have that as part of their ABI
   int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
-  int sp_slot_delta =
-      has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+  int sp_slot_delta = frame_access_state()->has_frame()
+                          ? (frame()->GetTotalFrameSlotCount() - pc_slots)
+                          : 0;
   // Discard only slots that won't be used by new parameters.
   sp_slot_delta += stack_param_delta;
   return sp_slot_delta;
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 70bf81f..b82181c 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -16,6 +16,7 @@
 namespace compiler {
 
 // Forward declarations.
+class DeoptimizationExit;
 class FrameAccessState;
 class Linkage;
 class OutOfLineCode;
@@ -76,15 +77,18 @@
 
   // Check if a heap object can be materialized by loading from the frame, which
   // is usually way cheaper than materializing the actual heap object constant.
-  bool IsMaterializableFromFrame(Handle<HeapObject> object, int* offset_return);
+  bool IsMaterializableFromFrame(Handle<HeapObject> object, int* slot_return);
   // Check if a heap object can be materialized by loading from a heap root,
   // which is cheaper on some platforms than materializing the actual heap
   // object constant.
   bool IsMaterializableFromRoot(Handle<HeapObject> object,
                                 Heap::RootListIndex* index_return);
 
+  // Assemble instructions for the specified block.
+  void AssembleBlock(const InstructionBlock* block);
+
   // Assemble code for the specified instruction.
-  void AssembleInstruction(Instruction* instr);
+  void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
   void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
@@ -105,6 +109,9 @@
   // Generates an architecture-specific, descriptor-specific prologue
   // to set up a stack frame.
   void AssemblePrologue();
+
+  void AssembleSetupStackPointer();
+
   // Generates an architecture-specific, descriptor-specific return sequence
   // to tear down a stack frame.
   void AssembleReturn();
@@ -112,9 +119,15 @@
   // Generates code to deconstruct a the caller's frame, including arguments.
   void AssembleDeconstructActivationRecord(int stack_param_delta);
 
+  void AssembleDeconstructFrame();
+
   // Generates code to manipulate the stack in preparation for a tail call.
   void AssemblePrepareTailCall(int stack_param_delta);
 
+  // Generates code to pop current frame if it is an arguments adaptor frame.
+  void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
+                                        Register scratch2, Register scratch3);
+
   // ===========================================================================
   // ============== Architecture-specific gap resolver methods. ================
   // ===========================================================================
@@ -144,10 +157,10 @@
   void RecordCallPosition(Instruction* instr);
   void PopulateDeoptimizationData(Handle<Code> code);
   int DefineDeoptimizationLiteral(Handle<Object> literal);
-  FrameStateDescriptor* GetFrameStateDescriptor(
-      Instruction* instr, size_t frame_access_state_offset);
+  FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+                                                size_t frame_state_offset);
   int BuildTranslation(Instruction* instr, int pc_offset,
-                       size_t frame_access_state_offset,
+                       size_t frame_state_offset,
                        OutputFrameStateCombine state_combine);
   void BuildTranslationForFrameStateDescriptor(
       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
@@ -165,6 +178,9 @@
   void EnsureSpaceForLazyDeopt();
   void MarkLazyDeoptSite();
 
+  DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
+                                            size_t frame_state_offset);
+
   // Converts the delta in the number of stack parameter passed from a tail
   // caller to the callee into the distance (in pointers) the SP must be
   // adjusted, taking frame elision and other relevant factors into
@@ -210,6 +226,7 @@
   GapResolver resolver_;
   SafepointTableBuilder safepoints_;
   ZoneVector<HandlerInfo> handlers_;
+  ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
   ZoneDeque<DeoptimizationState*> deoptimization_states_;
   ZoneDeque<Handle<Object>> deoptimization_literals_;
   size_t inlined_function_count_;
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
index 45f47d3..bbb4d63 100644
--- a/src/compiler/code-stub-assembler.cc
+++ b/src/compiler/code-stub-assembler.cc
@@ -28,12 +28,29 @@
                                      const CallInterfaceDescriptor& descriptor,
                                      Code::Flags flags, const char* name,
                                      size_t result_size)
-    : raw_assembler_(new RawMachineAssembler(
-          isolate, new (zone) Graph(zone),
+    : CodeStubAssembler(
+          isolate, zone,
           Linkage::GetStubCallDescriptor(
               isolate, zone, descriptor, descriptor.GetStackParameterCount(),
               CallDescriptor::kNoFlags, Operator::kNoProperties,
-              MachineType::AnyTagged(), result_size))),
+              MachineType::AnyTagged(), result_size),
+          flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     int parameter_count, Code::Flags flags,
+                                     const char* name)
+    : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
+                                           zone, false, parameter_count,
+                                           CallDescriptor::kNoFlags),
+                        flags, name) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+                                     CallDescriptor* call_descriptor,
+                                     Code::Flags flags, const char* name)
+    : raw_assembler_(new RawMachineAssembler(
+          isolate, new (zone) Graph(zone), call_descriptor,
+          MachineType::PointerRepresentation(),
+          InstructionSelector::SupportedMachineOperatorFlags())),
       flags_(flags),
       name_(name),
       code_generated_(false),
@@ -72,6 +89,9 @@
   return raw_assembler_->NumberConstant(value);
 }
 
+Node* CodeStubAssembler::SmiConstant(Smi* value) {
+  return IntPtrConstant(bit_cast<intptr_t>(value));
+}
 
 Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
   return raw_assembler_->HeapConstant(object);
@@ -86,11 +106,30 @@
   return raw_assembler_->ExternalConstant(address);
 }
 
+Node* CodeStubAssembler::Float64Constant(double value) {
+  return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+  return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+  return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NullConstant() {
+  return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+  return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
 Node* CodeStubAssembler::Parameter(int value) {
   return raw_assembler_->Parameter(value);
 }
 
-
 void CodeStubAssembler::Return(Node* value) {
   return raw_assembler_->Return(value);
 }
@@ -112,19 +151,253 @@
 }
 
 Node* CodeStubAssembler::SmiShiftBitsConstant() {
-  return Int32Constant(kSmiShiftSize + kSmiTagSize);
+  return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
 }
 
+Node* CodeStubAssembler::Float64Round(Node* x) {
+  Node* one = Float64Constant(1.0);
+  Node* one_half = Float64Constant(0.5);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this);
+
+  // Round up {x} towards Infinity.
+  var_x.Bind(Float64Ceil(x));
+
+  GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+         &return_x);
+  var_x.Bind(Float64Sub(var_x.value(), one));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+    return raw_assembler_->Float64RoundUp(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+    return raw_assembler_->Float64RoundDown(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than zero.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]0,2^52[.
+    GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+    // Round positive {x} towards -Infinity.
+    var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+    var_x.Bind(Float64Sub(var_x.value(), one));
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    // Just return {x} unless it's in the range ]-2^52,0[
+    GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+    GotoUnless(Float64LessThan(x, zero), &return_x);
+
+    // Round negated {x} towards -Infinity and return the result negated.
+    Node* minus_x = Float64Neg(x);
+    var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+    var_x.Bind(Float64Add(var_x.value(), one));
+    Goto(&return_minus_x);
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+  if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
+    return raw_assembler_->Float64RoundTruncate(x);
+  }
+
+  Node* one = Float64Constant(1.0);
+  Node* zero = Float64Constant(0.0);
+  Node* two_52 = Float64Constant(4503599627370496.0E0);
+  Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+  Variable var_x(this, MachineRepresentation::kFloat64);
+  Label return_x(this), return_minus_x(this);
+  var_x.Bind(x);
+
+  // Check if {x} is greater than 0.
+  Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+  Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+         &if_xnotgreaterthanzero);
+
+  Bind(&if_xgreaterthanzero);
+  {
+    if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
+      var_x.Bind(raw_assembler_->Float64RoundDown(x));
+    } else {
+      // Just return {x} unless it's in the range ]0,2^52[.
+      GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+      // Round positive {x} towards -Infinity.
+      var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+    }
+    Goto(&return_x);
+  }
+
+  Bind(&if_xnotgreaterthanzero);
+  {
+    if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
+      var_x.Bind(raw_assembler_->Float64RoundUp(x));
+      Goto(&return_x);
+    } else {
+      // Just return {x} unless its in the range ]-2^52,0[.
+      GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+      GotoUnless(Float64LessThan(x, zero), &return_x);
+
+      // Round negated {x} towards -Infinity and return result negated.
+      Node* minus_x = Float64Neg(x);
+      var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+      var_x.Bind(Float64Sub(var_x.value(), one));
+      Goto(&return_minus_x);
+    }
+  }
+
+  Bind(&return_minus_x);
+  var_x.Bind(Float64Neg(var_x.value()));
+  Goto(&return_x);
+
+  Bind(&return_x);
+  return var_x.value();
+}
 
 Node* CodeStubAssembler::SmiTag(Node* value) {
   return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
 }
 
-
 Node* CodeStubAssembler::SmiUntag(Node* value) {
   return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
 }
 
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+  Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+  if (raw_assembler_->machine()->Is64()) {
+    result = raw_assembler_->TruncateInt64ToInt32(result);
+  }
+  return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+  return ChangeInt32ToFloat64(SmiUntag(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+  return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+  return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+  return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+  return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+  // TODO(bmeurer): Consider using Select once available.
+  Variable min(this, MachineRepresentation::kTagged);
+  Label if_a(this), if_b(this), join(this);
+  BranchIfSmiLessThan(a, b, &if_a, &if_b);
+  Bind(&if_a);
+  min.Bind(a);
+  Goto(&join);
+  Bind(&if_b);
+  min.Bind(b);
+  Goto(&join);
+  Bind(&join);
+  return min.value();
+}
+
 #define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name)   \
   Node* CodeStubAssembler::name(Node* a, Node* b) { \
     return raw_assembler_->name(a, b);              \
@@ -132,56 +405,129 @@
 CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
 #undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
 
-Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
-  return raw_assembler_->ChangeInt32ToInt64(value);
+Node* CodeStubAssembler::WordShl(Node* value, int shift) {
+  return raw_assembler_->WordShl(value, IntPtrConstant(shift));
 }
 
-Node* CodeStubAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, Int32Constant(shift));
-}
+#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
+  Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
 
 Node* CodeStubAssembler::WordIsSmi(Node* a) {
-  return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
-                   Int32Constant(0));
+  return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
+                   IntPtrConstant(0));
 }
 
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
-                              IntPtrConstant(offset));
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+  return WordEqual(
+      raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+      IntPtrConstant(0));
 }
 
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), object,
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+                                          MachineType rep) {
+  return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+                                         MachineType rep) {
+  return raw_assembler_->Load(rep, object,
                               IntPtrConstant(offset - kHeapObjectTag));
 }
 
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+  return Load(MachineType::Float64(), object,
+              IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kFloat64, object,
+      IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+  Node* value = LoadHeapNumberValue(object);
+  return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
+                                                value);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+  return Load(MachineType::Uint32(), map,
+              IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+  return Load(MachineType::Uint8(), map,
+              IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+  return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name) {
+  return Load(MachineType::Uint32(), name,
+              IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
+    Node* object, Node* int32_index, int additional_offset) {
+  Node* header_size = IntPtrConstant(additional_offset +
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
+  Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
+  Node* offset = IntPtrAdd(scaled_index, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
+}
+
 Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
                                                        Node* smi_index,
                                                        int additional_offset) {
-  Node* header_size = raw_assembler_->Int32Constant(
-      additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+  int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+  Node* header_size = IntPtrConstant(additional_offset +
+                                     FixedArray::kHeaderSize - kHeapObjectTag);
   Node* scaled_index =
-      (kSmiShiftSize == 0)
-          ? raw_assembler_->Word32Shl(
-                smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
-          : raw_assembler_->Word32Shl(SmiUntag(smi_index),
-                                      Int32Constant(kPointerSizeLog2));
-  Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
-  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+      (kSmiShiftBits > kPointerSizeLog2)
+          ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
+          : WordShl(smi_index,
+                    IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
+  Node* offset = IntPtrAdd(scaled_index, header_size);
+  return Load(MachineType::AnyTagged(), object, offset);
 }
 
 Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
                                                             int index) {
-  Node* offset = raw_assembler_->Int32Constant(
-      FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+  Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
+                                index * kPointerSize);
   return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
 }
 
+Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
+                                                              Node* index,
+                                                              Node* value) {
+  Node* offset =
+      IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
+                IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+                             value);
+}
+
 Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
   if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
     Handle<Object> root = isolate()->heap()->root_handle(root_index);
     if (root->IsSmi()) {
-      return Int32Constant(Handle<Smi>::cast(root)->value());
+      return SmiConstant(Smi::cast(*root));
     } else {
       return HeapConstant(Handle<HeapObject>::cast(root));
     }
@@ -197,6 +543,135 @@
   return nullptr;
 }
 
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+                                              AllocationFlags flags,
+                                              Node* top_address,
+                                              Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+
+  // If there's not enough space, call the runtime.
+  RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
+      merge_runtime;
+  raw_assembler_->Branch(
+      raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
+      &runtime_call, &no_runtime_call);
+
+  raw_assembler_->Bind(&runtime_call);
+  // AllocateInTargetSpace does not use the context.
+  Node* context = IntPtrConstant(0);
+  Node* runtime_flags = SmiTag(Int32Constant(
+      AllocateDoubleAlignFlag::encode(false) |
+      AllocateTargetSpace::encode(flags & kPretenured
+                                      ? AllocationSpace::OLD_SPACE
+                                      : AllocationSpace::NEW_SPACE)));
+  Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+                                     SmiTag(size_in_bytes), runtime_flags);
+  raw_assembler_->Goto(&merge_runtime);
+
+  // When there is enough space, return `top' and bump it up.
+  raw_assembler_->Bind(&no_runtime_call);
+  Node* no_runtime_result = top;
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, size_in_bytes));
+  no_runtime_result =
+      IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
+  raw_assembler_->Goto(&merge_runtime);
+
+  raw_assembler_->Bind(&merge_runtime);
+  return raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                             runtime_result, no_runtime_result);
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+                                            AllocationFlags flags,
+                                            Node* top_address,
+                                            Node* limit_address) {
+  Node* top = Load(MachineType::Pointer(), top_address);
+  Node* limit = Load(MachineType::Pointer(), limit_address);
+  Node* adjusted_size = size_in_bytes;
+  if (flags & kDoubleAlignment) {
+    // TODO(epertoso): Simd128 alignment.
+    RawMachineLabel aligned, not_aligned, merge;
+    raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
+                           &not_aligned, &aligned);
+
+    raw_assembler_->Bind(&not_aligned);
+    Node* not_aligned_size =
+        IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+    raw_assembler_->Goto(&merge);
+
+    raw_assembler_->Bind(&aligned);
+    raw_assembler_->Goto(&merge);
+
+    raw_assembler_->Bind(&merge);
+    adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                                        not_aligned_size, adjusted_size);
+  }
+
+  Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
+
+  RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
+  raw_assembler_->Branch(
+      raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
+      &doesnt_need_filler, &needs_filler);
+
+  raw_assembler_->Bind(&needs_filler);
+  // Store a filler and increase the address by kPointerSize.
+  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+  // it when Simd128 alignment is supported.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+                      LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+  Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
+  raw_assembler_->Goto(&merge_address);
+
+  raw_assembler_->Bind(&doesnt_need_filler);
+  Node* address_without_filler = address;
+  raw_assembler_->Goto(&merge_address);
+
+  raw_assembler_->Bind(&merge_address);
+  address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
+                                address_with_filler, address_without_filler);
+  // Update the top.
+  StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+                      IntPtrAdd(top, adjusted_size));
+  return address;
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+  bool const new_space = !(flags & kPretenured);
+  Node* top_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_top_address(isolate())
+          : ExternalReference::old_space_allocation_top_address(isolate()));
+  Node* limit_address = ExternalConstant(
+      new_space
+          ? ExternalReference::new_space_allocation_limit_address(isolate())
+          : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+  if (flags & kDoubleAlignment) {
+    return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
+                              limit_address);
+  }
+#endif
+
+  return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
+                              limit_address);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+  Node* result = Allocate(HeapNumber::kSize, kNone);
+  StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+  return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = AllocateHeapNumber();
+  StoreHeapNumberValue(result, value);
+  return result;
+}
+
 Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
   return raw_assembler_->Load(rep, base);
 }
@@ -230,6 +705,232 @@
   return raw_assembler_->Projection(index, value);
 }
 
+Node* CodeStubAssembler::LoadMap(Node* object) {
+  return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+  return StoreNoWriteBarrier(
+      MachineRepresentation::kTagged, object,
+      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+  return LoadMapInstanceType(LoadMap(object));
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+  return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
+  return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+                                        uint32_t mask) {
+  return raw_assembler_->Word32Shr(
+      raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
+      raw_assembler_->Int32Constant(shift));
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+  Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
+      TruncationMode::kRoundToZero, value);
+  Node* value64 = ChangeInt32ToFloat64(value32);
+
+  Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+  Label if_valueisequal(this), if_valueisnotequal(this);
+  Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+  Bind(&if_valueisequal);
+  {
+    Label if_valueiszero(this), if_valueisnotzero(this);
+    Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
+           &if_valueisnotzero);
+
+    Bind(&if_valueiszero);
+    BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
+                          Int32Constant(0), &if_valueisheapnumber,
+                          &if_valueisint32);
+
+    Bind(&if_valueisnotzero);
+    Goto(&if_valueisint32);
+  }
+  Bind(&if_valueisnotequal);
+  Goto(&if_valueisheapnumber);
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Bind(&if_valueisint32);
+  {
+    if (raw_assembler_->machine()->Is64()) {
+      Node* result = SmiTag(ChangeInt32ToInt64(value32));
+      var_result.Bind(result);
+      Goto(&if_join);
+    } else {
+      Node* pair = Int32AddWithOverflow(value32, value32);
+      Node* overflow = Projection(1, pair);
+      Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+      Branch(overflow, &if_overflow, &if_notoverflow);
+      Bind(&if_overflow);
+      Goto(&if_valueisheapnumber);
+      Bind(&if_notoverflow);
+      {
+        Node* result = Projection(0, pair);
+        var_result.Bind(result);
+        Goto(&if_join);
+      }
+    }
+  }
+  Bind(&if_valueisheapnumber);
+  {
+    Node* result = AllocateHeapNumberWithValue(value);
+    var_result.Bind(result);
+    Goto(&if_join);
+  }
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+  if (raw_assembler_->machine()->Is64()) {
+    return SmiTag(ChangeInt32ToInt64(value));
+  }
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Node* pair = Int32AddWithOverflow(value, value);
+  Node* overflow = Projection(1, pair);
+  Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+      if_join(this);
+  Branch(overflow, &if_overflow, &if_notoverflow);
+  Bind(&if_overflow);
+  {
+    Node* value64 = ChangeInt32ToFloat64(value);
+    Node* result = AllocateHeapNumberWithValue(value64);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_notoverflow);
+  {
+    Node* result = Projection(0, pair);
+    var_result.Bind(result);
+  }
+  Goto(&if_join);
+  Bind(&if_join);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kFloat64);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToFloat64(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Load the floating point value.
+        var_result.Bind(LoadHeapNumberValue(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+  // We might need to loop once due to ToNumber conversion.
+  Variable var_value(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kWord32);
+  Label loop(this, &var_value), done_loop(this, &var_result);
+  var_value.Bind(value);
+  Goto(&loop);
+  Bind(&loop);
+  {
+    // Load the current {value}.
+    value = var_value.value();
+
+    // Check if the {value} is a Smi or a HeapObject.
+    Label if_valueissmi(this), if_valueisnotsmi(this);
+    Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+    Bind(&if_valueissmi);
+    {
+      // Convert the Smi {value}.
+      var_result.Bind(SmiToWord32(value));
+      Goto(&done_loop);
+    }
+
+    Bind(&if_valueisnotsmi);
+    {
+      // Check if {value} is a HeapNumber.
+      Label if_valueisheapnumber(this),
+          if_valueisnotheapnumber(this, Label::kDeferred);
+      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+             &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+      Bind(&if_valueisheapnumber);
+      {
+        // Truncate the floating point value.
+        var_result.Bind(TruncateHeapNumberValueToWord32(value));
+        Goto(&done_loop);
+      }
+
+      Bind(&if_valueisnotheapnumber);
+      {
+        // Convert the {value} to a Number first.
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_value.Bind(CallStub(callable, context, value));
+        Goto(&loop);
+      }
+    }
+  }
+  Bind(&done_loop);
+  return var_result.value();
+}
+
+void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
+                                 Label* if_false) {
+  Label if_condition_is_true(this), if_condition_is_false(this);
+  Branch(condition, &if_condition_is_true, &if_condition_is_false);
+  Bind(&if_condition_is_true);
+  Goto(if_true);
+  Bind(&if_condition_is_false);
+  Goto(if_false);
+}
+
 Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
                                Node** args) {
   CallPrologue();
@@ -290,6 +991,11 @@
 }
 
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context) {
+  return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1) {
   return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
 }
@@ -314,6 +1020,12 @@
                                           context);
 }
 
+Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
+                                  Node* arg1, size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
 Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
                                   Node* target, Node* context, Node* arg1,
                                   size_t result_size) {
@@ -401,12 +1113,28 @@
   return CallN(call_descriptor, target, args);
 }
 
-Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
-  Node* code_target = HeapConstant(stub.GetCode());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), stub.GetCallInterfaceDescriptor(),
-      stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
+Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
+                                      Node* arg1, Node* arg2,
+                                      size_t result_size) {
+  Node* target = HeapConstant(callable.code());
+  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+                      result_size);
+}
+
+Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+                                      Node* target, Node* context, Node* arg1,
+                                      Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return raw_assembler_->TailCallN(call_descriptor, target, args);
 }
 
 Node* CodeStubAssembler::TailCall(
@@ -425,6 +1153,18 @@
   raw_assembler_->Goto(label->label_);
 }
 
+void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
+  Label false_label(this);
+  Branch(condition, true_label, &false_label);
+  Bind(&false_label);
+}
+
+void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
+  Label true_label(this);
+  Branch(condition, &true_label, false_label);
+  Bind(&true_label);
+}
+
 void CodeStubAssembler::Branch(Node* condition,
                                CodeStubAssembler::Label* true_label,
                                CodeStubAssembler::Label* false_label) {
@@ -450,11 +1190,15 @@
 }
 
 // RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+Isolate* CodeStubAssembler::isolate() const {
+  return raw_assembler_->isolate();
+}
 
-Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
 
-Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
+
+Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
 
 // The core implementation of Variable is stored through an indirection so
 // that it can outlive the often block-scoped Variable declarations. This is
@@ -488,27 +1232,20 @@
   return impl_->value_ != nullptr;
 }
 
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
-    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
-  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
-  label_ = new (buffer) RawMachineLabel();
-}
-
 CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
                                 int merged_value_count,
-                                CodeStubAssembler::Variable** merged_variables)
+                                CodeStubAssembler::Variable** merged_variables,
+                                CodeStubAssembler::Label::Type type)
     : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
   void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
-  label_ = new (buffer) RawMachineLabel();
+  label_ = new (buffer)
+      RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+                                        : RawMachineLabel::kNonDeferred);
   for (int i = 0; i < merged_value_count; ++i) {
     variable_phis_[merged_variables[i]->impl_] = nullptr;
   }
 }
 
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
-                                CodeStubAssembler::Variable* merged_variable)
-    : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
-
 void CodeStubAssembler::Label::MergeVariables() {
   ++merge_count_;
   for (auto var : assembler_->variables_) {
@@ -539,16 +1276,17 @@
         assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
       } else {
         auto i = variable_merges_.find(var);
-        USE(i);
-        // If the following assert fires, then you've declared a variable that
-        // has the same bound value along all paths up until the point you bound
-        // this label, but then later merged a path with a new value for the
-        // variable after the label bind (it's not possible to add phis to the
-        // bound label after the fact, just make sure to list the variable in
-        // the label's constructor's list of merged variables).
-        DCHECK(find_if(i->second.begin(), i->second.end(),
-                       [node](Node* e) -> bool { return node != e; }) ==
-               i->second.end());
+        if (i != variable_merges_.end()) {
+          // If the following assert fires, then you've declared a variable that
+          // has the same bound value along all paths up until the point you
+          // bound this label, but then later merged a path with a new value for
+          // the variable after the label bind (it's not possible to add phis to
+          // the bound label after the fact, just make sure to list the variable
+          // in the label's constructor's list of merged variables).
+          DCHECK(find_if(i->second.begin(), i->second.end(),
+                         [node](Node* e) -> bool { return node != e; }) ==
+                 i->second.end());
+        }
       }
     }
   }
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
index 2ab1376..9fcb890 100644
--- a/src/compiler/code-stub-assembler.h
+++ b/src/compiler/code-stub-assembler.h
@@ -19,8 +19,10 @@
 namespace v8 {
 namespace internal {
 
+class Callable;
 class CallInterfaceDescriptor;
 class Isolate;
+class Factory;
 class Zone;
 
 namespace compiler {
@@ -33,49 +35,91 @@
 class RawMachineLabel;
 class Schedule;
 
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
-  V(IntPtrAdd)                                \
-  V(IntPtrSub)                                \
-  V(Int32Add)                                 \
-  V(Int32Sub)                                 \
-  V(Int32Mul)                                 \
-  V(Int32GreaterThanOrEqual)                  \
-  V(WordEqual)                                \
-  V(WordNotEqual)                             \
-  V(WordOr)                                   \
-  V(WordAnd)                                  \
-  V(WordXor)                                  \
-  V(WordShl)                                  \
-  V(WordShr)                                  \
-  V(WordSar)                                  \
-  V(WordRor)                                  \
-  V(Word32Equal)                              \
-  V(Word32NotEqual)                           \
-  V(Word32Or)                                 \
-  V(Word32And)                                \
-  V(Word32Xor)                                \
-  V(Word32Shl)                                \
-  V(Word32Shr)                                \
-  V(Word32Sar)                                \
-  V(Word32Ror)                                \
-  V(Word64Equal)                              \
-  V(Word64NotEqual)                           \
-  V(Word64Or)                                 \
-  V(Word64And)                                \
-  V(Word64Xor)                                \
-  V(Word64Shr)                                \
-  V(Word64Sar)                                \
-  V(Word64Ror)                                \
-  V(UintPtrGreaterThanOrEqual)
+#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float32Equal)                                     \
+  V(Float32LessThan)                                  \
+  V(Float32LessThanOrEqual)                           \
+  V(Float32GreaterThan)                               \
+  V(Float32GreaterThanOrEqual)                        \
+  V(Float64Equal)                                     \
+  V(Float64LessThan)                                  \
+  V(Float64LessThanOrEqual)                           \
+  V(Float64GreaterThan)                               \
+  V(Float64GreaterThanOrEqual)                        \
+  V(Int32GreaterThan)                                 \
+  V(Int32GreaterThanOrEqual)                          \
+  V(Int32LessThan)                                    \
+  V(Int32LessThanOrEqual)                             \
+  V(IntPtrLessThan)                                   \
+  V(IntPtrLessThanOrEqual)                            \
+  V(Uint32LessThan)                                   \
+  V(UintPtrGreaterThanOrEqual)                        \
+  V(WordEqual)                                        \
+  V(WordNotEqual)                                     \
+  V(Word32Equal)                                      \
+  V(Word32NotEqual)                                   \
+  V(Word64Equal)                                      \
+  V(Word64NotEqual)
+
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V)   \
+  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+  V(Float64Add)                                 \
+  V(Float64Sub)                                 \
+  V(Float64InsertLowWord32)                     \
+  V(Float64InsertHighWord32)                    \
+  V(IntPtrAdd)                                  \
+  V(IntPtrAddWithOverflow)                      \
+  V(IntPtrSub)                                  \
+  V(IntPtrSubWithOverflow)                      \
+  V(Int32Add)                                   \
+  V(Int32AddWithOverflow)                       \
+  V(Int32Sub)                                   \
+  V(Int32Mul)                                   \
+  V(WordOr)                                     \
+  V(WordAnd)                                    \
+  V(WordXor)                                    \
+  V(WordShl)                                    \
+  V(WordShr)                                    \
+  V(WordSar)                                    \
+  V(WordRor)                                    \
+  V(Word32Or)                                   \
+  V(Word32And)                                  \
+  V(Word32Xor)                                  \
+  V(Word32Shl)                                  \
+  V(Word32Shr)                                  \
+  V(Word32Sar)                                  \
+  V(Word32Ror)                                  \
+  V(Word64Or)                                   \
+  V(Word64And)                                  \
+  V(Word64Xor)                                  \
+  V(Word64Shr)                                  \
+  V(Word64Sar)                                  \
+  V(Word64Ror)
+
+#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
+  V(Float64Neg)                              \
+  V(Float64Sqrt)                             \
+  V(ChangeFloat64ToUint32)                   \
+  V(ChangeInt32ToFloat64)                    \
+  V(ChangeInt32ToInt64)                      \
+  V(ChangeUint32ToFloat64)                   \
+  V(ChangeUint32ToUint64)                    \
+  V(Word32Clz)
 
 class CodeStubAssembler {
  public:
+  // Create with CallStub linkage.
   // |result_size| specifies the number of results returned by the stub.
   // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
   CodeStubAssembler(Isolate* isolate, Zone* zone,
                     const CallInterfaceDescriptor& descriptor,
                     Code::Flags flags, const char* name,
                     size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+                    Code::Flags flags, const char* name);
+
   virtual ~CodeStubAssembler();
 
   Handle<Code> GenerateCode();
@@ -95,6 +139,14 @@
     Impl* impl_;
   };
 
+  enum AllocationFlag : uint8_t {
+    kNone = 0,
+    kDoubleAlignment = 1,
+    kPretenured = 1 << 1
+  };
+
+  typedef base::Flags<AllocationFlag> AllocationFlags;
+
   // ===========================================================================
   // Base Assembler
   // ===========================================================================
@@ -103,15 +155,23 @@
   Node* Int32Constant(int value);
   Node* IntPtrConstant(intptr_t value);
   Node* NumberConstant(double value);
+  Node* SmiConstant(Smi* value);
   Node* HeapConstant(Handle<HeapObject> object);
   Node* BooleanConstant(bool value);
   Node* ExternalConstant(ExternalReference address);
+  Node* Float64Constant(double value);
+  Node* BooleanMapConstant();
+  Node* HeapNumberMapConstant();
+  Node* NullConstant();
+  Node* UndefinedConstant();
 
   Node* Parameter(int value);
   void Return(Node* value);
 
   void Bind(Label* label);
   void Goto(Label* label);
+  void GotoIf(Node* condition, Label* true_label);
+  void GotoUnless(Node* condition, Label* false_label);
   void Branch(Node* condition, Label* true_label, Label* false_label);
 
   void Switch(Node* index, Label* default_label, int32_t* case_values,
@@ -142,8 +202,10 @@
 
   Node* WordShl(Node* value, int shift);
 
-  // Conversions
-  Node* ChangeInt32ToInt64(Node* value);
+// Unary
+#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
+  CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
 
   // Projections
   Node* Projection(int index, Node* value);
@@ -160,6 +222,7 @@
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
                     Node* arg2, Node* arg3, Node* arg4, Node* arg5);
 
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
@@ -169,6 +232,9 @@
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2, Node* arg3, Node* arg4);
 
+  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+                 size_t result_size = 1);
+
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node* context, Node* arg1, size_t result_size = 1);
   Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
@@ -183,7 +249,13 @@
                  Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                  Node* arg5, size_t result_size = 1);
 
-  Node* TailCallStub(CodeStub& stub, Node** args);
+  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+                     Node* arg2, size_t result_size = 1);
+
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, Node* arg1, Node* arg2,
+                     size_t result_size = 1);
+
   Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
                  Node** args, size_t result_size = 1);
 
@@ -191,31 +263,141 @@
   // Macros
   // ===========================================================================
 
-  // Tag and untag Smi values.
+  // Float64 operations.
+  Node* Float64Ceil(Node* x);
+  Node* Float64Floor(Node* x);
+  Node* Float64Round(Node* x);
+  Node* Float64Trunc(Node* x);
+
+  // Tag a Word as a Smi value.
   Node* SmiTag(Node* value);
+  // Untag a Smi value as a Word.
   Node* SmiUntag(Node* value);
 
+  // Smi conversions.
+  Node* SmiToFloat64(Node* value);
+  Node* SmiToWord32(Node* value);
+
+  // Smi operations.
+  Node* SmiAdd(Node* a, Node* b);
+  Node* SmiAddWithOverflow(Node* a, Node* b);
+  Node* SmiSub(Node* a, Node* b);
+  Node* SmiSubWithOverflow(Node* a, Node* b);
+  Node* SmiEqual(Node* a, Node* b);
+  Node* SmiLessThan(Node* a, Node* b);
+  Node* SmiLessThanOrEqual(Node* a, Node* b);
+  Node* SmiMin(Node* a, Node* b);
+
   // Load a value from the root array.
   Node* LoadRoot(Heap::RootListIndex root_index);
 
   // Check a value for smi-ness
   Node* WordIsSmi(Node* a);
 
+  // Check that the value is a positive smi.
+  Node* WordIsPositiveSmi(Node* a);
+
   // Load an object pointer from a buffer that isn't in the heap.
-  Node* LoadBufferObject(Node* buffer, int offset);
+  Node* LoadBufferObject(Node* buffer, int offset,
+                         MachineType rep = MachineType::AnyTagged());
   // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
+  Node* LoadObjectField(Node* object, int offset,
+                        MachineType rep = MachineType::AnyTagged());
+  // Load the floating point value of a HeapNumber.
+  Node* LoadHeapNumberValue(Node* object);
+  // Store the floating point value of a HeapNumber.
+  Node* StoreHeapNumberValue(Node* object, Node* value);
+  // Truncate the floating point value of a HeapNumber to an Int32.
+  Node* TruncateHeapNumberValueToWord32(Node* object);
+  // Load the bit field of a Map.
+  Node* LoadMapBitField(Node* map);
+  // Load bit field 2 of a map.
+  Node* LoadMapBitField2(Node* map);
+  // Load bit field 3 of a map.
+  Node* LoadMapBitField3(Node* map);
+  // Load the instance type of a map.
+  Node* LoadMapInstanceType(Node* map);
+  // Load the instance descriptors of a map.
+  Node* LoadMapDescriptors(Node* map);
+
+  // Load the hash field of a name.
+  Node* LoadNameHash(Node* name);
 
   // Load an array element from a FixedArray.
+  Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
+                                        int additional_offset = 0);
   Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
                                       int additional_offset = 0);
   Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
 
+  // Allocate an object of the given size.
+  Node* Allocate(int size, AllocationFlags flags = kNone);
+  // Allocate a HeapNumber without initializing its value.
+  Node* AllocateHeapNumber();
+  // Allocate a HeapNumber with a specific value.
+  Node* AllocateHeapNumberWithValue(Node* value);
+
+  // Store an array element to a FixedArray.
+  Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
+                                             Node* value);
+  // Load the Map of an HeapObject.
+  Node* LoadMap(Node* object);
+  // Store the Map of an HeapObject.
+  Node* StoreMapNoWriteBarrier(Node* object, Node* map);
+  // Load the instance type of an HeapObject.
+  Node* LoadInstanceType(Node* object);
+
+  // Load the elements backing store of a JSObject.
+  Node* LoadElements(Node* object);
+  // Load the length of a fixed array base instance.
+  Node* LoadFixedArrayBaseLength(Node* array);
+
+  // Returns a node that is true if the given bit is set in |word32|.
+  template <typename T>
+  Node* BitFieldDecode(Node* word32) {
+    return BitFieldDecode(word32, T::kShift, T::kMask);
+  }
+
+  Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
+
+  // Conversions.
+  Node* ChangeFloat64ToTagged(Node* value);
+  Node* ChangeInt32ToTagged(Node* value);
+  Node* TruncateTaggedToFloat64(Node* context, Node* value);
+  Node* TruncateTaggedToWord32(Node* context, Node* value);
+
+  // Branching helpers.
+  // TODO(danno): Can we be more cleverish wrt. edge-split?
+  void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name)                                                \
+  void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+    BranchIf(name(a, b), if_true, if_false);                               \
+  }
+  CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+  void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
+    BranchIf(SmiLessThan(a, b), if_true, if_false);
+  }
+
+  void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
+                                  Label* if_false) {
+    BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+  }
+
+  void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
+    BranchIfFloat64Equal(value, value, if_false, if_true);
+  }
+
+  // Helpers which delegate to RawMachineAssembler.
+  Factory* factory() const;
+  Isolate* isolate() const;
+  Zone* zone() const;
+
  protected:
   // Protected helpers which delegate to RawMachineAssembler.
-  Graph* graph();
-  Isolate* isolate();
-  Zone* zone();
+  Graph* graph() const;
 
   // Enables subclasses to perform operations before and after a call.
   virtual void CallPrologue();
@@ -224,11 +406,20 @@
  private:
   friend class CodeStubAssemblerTester;
 
+  CodeStubAssembler(Isolate* isolate, Zone* zone,
+                    CallDescriptor* call_descriptor, Code::Flags flags,
+                    const char* name);
+
   Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
   Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
 
   Node* SmiShiftBitsConstant();
 
+  Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
+                           Node* top_address, Node* limit_address);
+  Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
+                             Node* top_adddress, Node* limit_address);
+
   base::SmartPointer<RawMachineAssembler> raw_assembler_;
   Code::Flags flags_;
   const char* name_;
@@ -238,13 +429,25 @@
   DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
+
 class CodeStubAssembler::Label {
  public:
-  explicit Label(CodeStubAssembler* assembler);
-  Label(CodeStubAssembler* assembler, int merged_variable_count,
-        CodeStubAssembler::Variable** merged_variables);
+  enum Type { kDeferred, kNonDeferred };
+
+  explicit Label(CodeStubAssembler* assembler,
+                 CodeStubAssembler::Label::Type type =
+                     CodeStubAssembler::Label::kNonDeferred)
+      : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
   Label(CodeStubAssembler* assembler,
-        CodeStubAssembler::Variable* merged_variable);
+        CodeStubAssembler::Variable* merged_variable,
+        CodeStubAssembler::Label::Type type =
+            CodeStubAssembler::Label::kNonDeferred)
+      : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
+  Label(CodeStubAssembler* assembler, int merged_variable_count,
+        CodeStubAssembler::Variable** merged_variables,
+        CodeStubAssembler::Label::Type type =
+            CodeStubAssembler::Label::kNonDeferred);
   ~Label() {}
 
  private:
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 2334541..22e16a2 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -57,6 +57,9 @@
   switch (node->opcode()) {
     case IrOpcode::kBranch:
       return ReduceBranch(node);
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      return ReduceDeoptimizeConditional(node);
     case IrOpcode::kMerge:
       return ReduceMerge(node);
     case IrOpcode::kEffectPhi:
@@ -123,6 +126,37 @@
   return Replace(dead());
 }
 
+Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
+         node->opcode() == IrOpcode::kDeoptimizeUnless);
+  bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+  Node* condition = NodeProperties::GetValueInput(node, 0);
+  Node* frame_state = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  // Swap DeoptimizeIf/DeoptimizeUnless on {node} if {cond} is a BooleaNot
+  // and use the input to BooleanNot as new condition for {node}.  Note we
+  // assume that {cond} was already properly optimized before we get here
+  // (as guaranteed by the graph reduction logic).
+  if (condition->opcode() == IrOpcode::kBooleanNot) {
+    NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
+    NodeProperties::ChangeOp(node, condition_is_true
+                                       ? common()->DeoptimizeIf()
+                                       : common()->DeoptimizeUnless());
+    return Changed(node);
+  }
+  Decision const decision = DecideCondition(condition);
+  if (decision == Decision::kUnknown) return NoChange();
+  if (condition_is_true == (decision == Decision::kTrue)) {
+    return Replace(control);
+  }
+  control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+                             frame_state, effect, control);
+  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+  NodeProperties::MergeControlToEnd(graph(), common(), control);
+  Revisit(graph()->end());
+  return Replace(dead());
+}
 
 Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
   DCHECK_EQ(IrOpcode::kMerge, node->opcode());
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index 7184755..49d9f1d 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -30,6 +30,7 @@
 
  private:
   Reduction ReduceBranch(Node* node);
+  Reduction ReduceDeoptimizeConditional(Node* node);
   Reduction ReduceMerge(Node* node);
   Reduction ReduceEffectPhi(Node* node);
   Reduction ReducePhi(Node* node);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index c92bae9..3bb1b34 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -142,21 +142,21 @@
   return os;
 }
 
-
-#define CACHED_OP_LIST(V)                                  \
-  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)           \
-  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)          \
-  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
-  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)       \
-  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)       \
-  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)           \
-  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)       \
-  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
-  V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
-  V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)     \
+#define CACHED_OP_LIST(V)                                    \
+  V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1)             \
+  V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1)     \
+  V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+  V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1)            \
+  V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1)           \
+  V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
+  V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1)         \
+  V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1)             \
+  V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)         \
+  V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)   \
+  V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)     \
+  V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0)       \
   V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
 
-
 #define CACHED_RETURN_LIST(V) \
   V(1)                        \
   V(2)                        \
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 7c3f3da..7c59f47 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -133,6 +133,8 @@
   const Operator* IfDefault();
   const Operator* Throw();
   const Operator* Deoptimize(DeoptimizeKind kind);
+  const Operator* DeoptimizeIf();
+  const Operator* DeoptimizeUnless();
   const Operator* Return(int value_input_count = 1);
   const Operator* Terminate();
 
diff --git a/src/compiler/fast-accessor-assembler.cc b/src/compiler/fast-accessor-assembler.cc
deleted file mode 100644
index 518003b..0000000
--- a/src/compiler/fast-accessor-assembler.cc
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/fast-accessor-assembler.h"
-
-#include "src/base/logging.h"
-#include "src/code-stubs.h"  // For CallApiFunctionStub.
-#include "src/compiler/graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/compiler/verifier.h"
-#include "src/handles-inl.h"
-#include "src/objects.h"  // For FAA::GetInternalField impl.
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
-    : zone_(),
-      assembler_(new RawMachineAssembler(
-          isolate, new (zone()) Graph(zone()),
-          Linkage::GetJSCallDescriptor(&zone_, false, 1,
-                                       CallDescriptor::kNoFlags))),
-      state_(kBuilding) {}
-
-
-FastAccessorAssembler::~FastAccessorAssembler() {}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
-    int const_value) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->NumberConstant(const_value));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
-  CHECK_EQ(kBuilding, state_);
-
-  // For JS call descriptor, the receiver is parameter 0. If we use other
-  // call descriptors, this may or may not hold. So let's check.
-  CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
-  return FromRaw(assembler_->Parameter(0));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
-    ValueId value, int field_no) {
-  CHECK_EQ(kBuilding, state_);
-  // Determine the 'value' object's instance type.
-  Node* object_map =
-      assembler_->Load(MachineType::Pointer(), FromId(value),
-                       assembler_->IntPtrConstant(
-                           Internals::kHeapObjectMapOffset - kHeapObjectTag));
-  Node* instance_type = assembler_->WordAnd(
-      assembler_->Load(
-          MachineType::Uint16(), object_map,
-          assembler_->IntPtrConstant(
-              Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
-      assembler_->IntPtrConstant(0xff));
-
-  // Check whether we have a proper JSObject.
-  RawMachineLabel is_jsobject, is_not_jsobject, merge;
-  assembler_->Branch(
-      assembler_->WordEqual(
-          instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
-      &is_jsobject, &is_not_jsobject);
-
-  // JSObject? Then load the internal field field_no.
-  assembler_->Bind(&is_jsobject);
-  Node* internal_field = assembler_->Load(
-      MachineType::Pointer(), FromId(value),
-      assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
-                                 kPointerSize * field_no));
-  assembler_->Goto(&merge);
-
-  // No JSObject? Return undefined.
-  // TODO(vogelheim): Check whether this is the appropriate action, or whether
-  //                  the method should take a label instead.
-  assembler_->Bind(&is_not_jsobject);
-  Node* fail_value = assembler_->UndefinedConstant();
-  assembler_->Goto(&merge);
-
-  // Return.
-  assembler_->Bind(&merge);
-  Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
-                              fail_value);
-  return FromRaw(phi);
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
-                                                                int offset) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
-                                  assembler_->IntPtrConstant(offset)));
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
-                                                                 int offset) {
-  CHECK_EQ(kBuilding, state_);
-  return FromRaw(
-      assembler_->Load(MachineType::AnyTagged(),
-                       assembler_->Load(MachineType::Pointer(), FromId(value),
-                                        assembler_->IntPtrConstant(offset))));
-}
-
-
-void FastAccessorAssembler::ReturnValue(ValueId value) {
-  CHECK_EQ(kBuilding, state_);
-  assembler_->Return(FromId(value));
-}
-
-
-void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel pass, fail;
-  assembler_->Branch(
-      assembler_->Word32Equal(
-          assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
-          assembler_->Int32Constant(0)),
-      &pass, &fail);
-  assembler_->Bind(&fail);
-  assembler_->Return(assembler_->NullConstant());
-  assembler_->Bind(&pass);
-}
-
-
-void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel is_null, not_null;
-  assembler_->Branch(
-      assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
-      &is_null, &not_null);
-  assembler_->Bind(&is_null);
-  assembler_->Return(assembler_->NullConstant());
-  assembler_->Bind(&not_null);
-}
-
-
-FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel* label =
-      new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
-  return FromRaw(label);
-}
-
-
-void FastAccessorAssembler::SetLabel(LabelId label_id) {
-  CHECK_EQ(kBuilding, state_);
-  assembler_->Bind(FromId(label_id));
-}
-
-
-void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
-                                               LabelId label_id) {
-  CHECK_EQ(kBuilding, state_);
-  RawMachineLabel pass;
-  assembler_->Branch(
-      assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
-      &pass, FromId(label_id));
-  assembler_->Bind(&pass);
-}
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
-    FunctionCallback callback_function, ValueId arg) {
-  CHECK_EQ(kBuilding, state_);
-
-  // Create API function stub.
-  CallApiFunctionStub stub(assembler_->isolate(), true);
-
-  // Wrap the FunctionCallback in an ExternalReference.
-  ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
-  ExternalReference callback(&callback_api_function,
-                             ExternalReference::DIRECT_API_CALL,
-                             assembler_->isolate());
-
-  // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
-  // through to the callback.
-  // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
-  static const int kStackParam = 1;
-  Node* args[] = {
-      // Stub/register parameters:
-      assembler_->Parameter(0),                /* receiver (use accessor's) */
-      assembler_->UndefinedConstant(),         /* call_data (undefined) */
-      assembler_->NullConstant(),              /* holder (null) */
-      assembler_->ExternalConstant(callback),  /* API callback function */
-      assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
-
-      // kStackParam stack parameter(s):
-      FromId(arg),
-
-      // Context parameter. (See Linkage::GetStubCallDescriptor.)
-      assembler_->UndefinedConstant()};
-  CHECK_EQ(5 + kStackParam + 1, arraysize(args));
-
-  Node* call = assembler_->CallN(
-      Linkage::GetStubCallDescriptor(
-          assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
-          kStackParam + stub.GetStackParameterCount(),
-          CallDescriptor::kNoFlags),
-      assembler_->HeapConstant(stub.GetCode()), args);
-  return FromRaw(call);
-}
-
-MaybeHandle<Code> FastAccessorAssembler::Build() {
-  CHECK_EQ(kBuilding, state_);
-
-  // Cleanup: We no longer need this.
-  nodes_.clear();
-  labels_.clear();
-
-  // Export the schedule and call the compiler.
-  Schedule* schedule = assembler_->Export();
-  Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
-      schedule, flags, "FastAccessorAssembler");
-
-  // Update state & return.
-  state_ = !code.is_null() ? kBuilt : kError;
-  return code;
-}
-
-
-FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
-  nodes_.push_back(node);
-  ValueId value = {nodes_.size() - 1};
-  return value;
-}
-
-
-FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
-    RawMachineLabel* label) {
-  labels_.push_back(label);
-  LabelId label_id = {labels_.size() - 1};
-  return label_id;
-}
-
-
-Node* FastAccessorAssembler::FromId(ValueId value) const {
-  CHECK_LT(value.value_id, nodes_.size());
-  CHECK_NOT_NULL(nodes_.at(value.value_id));
-  return nodes_.at(value.value_id);
-}
-
-
-RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
-  CHECK_LT(label.label_id, labels_.size());
-  CHECK_NOT_NULL(labels_.at(label.label_id));
-  return labels_.at(label.label_id);
-}
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/fast-accessor-assembler.h b/src/compiler/fast-accessor-assembler.h
deleted file mode 100644
index 1cb751d..0000000
--- a/src/compiler/fast-accessor-assembler.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
-#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
-
-#include <stdint.h>
-#include <vector>
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "include/v8-experimental.h"
-#include "src/base/macros.h"
-#include "src/base/smart-pointers.h"
-#include "src/handles.h"
-
-
-namespace v8 {
-namespace internal {
-
-class Code;
-class Isolate;
-class Zone;
-
-namespace compiler {
-
-class Node;
-class RawMachineAssembler;
-class RawMachineLabel;
-
-
-// This interface "exports" an aggregated subset of RawMachineAssembler, for
-// use by the API to implement Fast Dom Accessors.
-//
-// This interface is made for this single purpose only and does not attempt
-// to implement a general purpose solution. If you need one, please look at
-// RawMachineAssembler instead.
-//
-// The life cycle of a FastAccessorAssembler has two phases:
-// - After creating the instance, you can call an arbitrary sequence of
-//   builder functions to build the desired function.
-// - When done, you can Build() the accessor and query for the build results.
-//
-// You cannot call any result getters before Build() was called & successful;
-// and you cannot call any builder functions after Build() was called.
-class FastAccessorAssembler {
- public:
-  typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
-  typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
-  typedef v8::FunctionCallback FunctionCallback;
-
-  explicit FastAccessorAssembler(Isolate* isolate);
-  ~FastAccessorAssembler();
-
-  // Builder / assembler functions:
-  ValueId IntegerConstant(int int_constant);
-  ValueId GetReceiver();
-  ValueId LoadInternalField(ValueId value_id, int field_no);
-  ValueId LoadValue(ValueId value_id, int offset);
-  ValueId LoadObject(ValueId value_id, int offset);
-
-  // Builder / assembler functions for control flow.
-  void ReturnValue(ValueId value_id);
-  void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
-  void CheckNotZeroOrReturnNull(ValueId value_id);
-  LabelId MakeLabel();
-  void SetLabel(LabelId label_id);
-  void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
-
-  // C++ callback.
-  ValueId Call(FunctionCallback callback, ValueId arg);
-
-  // Assemble the code.
-  MaybeHandle<Code> Build();
-
- private:
-  ValueId FromRaw(Node* node);
-  LabelId FromRaw(RawMachineLabel* label);
-  Node* FromId(ValueId value) const;
-  RawMachineLabel* FromId(LabelId value) const;
-
-  Zone* zone() { return &zone_; }
-
-  Zone zone_;
-  base::SmartPointer<RawMachineAssembler> assembler_;
-
-  // To prevent exposing the RMA internals to the outside world, we'll map
-  // Node + Label pointers integers wrapped in ValueId and LabelId instances.
-  // These vectors maintain this mapping.
-  std::vector<Node*> nodes_;
-  std::vector<RawMachineLabel*> labels_;
-
-  // Remember the current state for easy error checking. (We prefer to be
-  // strict as this class will be exposed at the API.)
-  enum { kBuilding, kBuilt, kError } state_;
-
-  DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
index 7c3f9b2..5ad4aad 100644
--- a/src/compiler/frame-elider.cc
+++ b/src/compiler/frame-elider.cc
@@ -19,11 +19,12 @@
 
 
 void FrameElider::MarkBlocks() {
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     if (block->needs_frame()) continue;
-    for (auto i = block->code_start(); i < block->code_end(); ++i) {
-      if (InstructionAt(i)->IsCall() ||
-          InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
+    for (int i = block->code_start(); i < block->code_end(); ++i) {
+      const Instruction* instr = InstructionAt(i);
+      if (instr->IsCall() || instr->IsDeoptimizeCall() ||
+          instr->arch_opcode() == ArchOpcode::kArchStackPointer) {
         block->mark_needs_frame();
         break;
       }
@@ -33,13 +34,13 @@
 
 
 void FrameElider::PropagateMarks() {
-  while (PropagateInOrder() && PropagateReversed()) {
+  while (PropagateInOrder() || PropagateReversed()) {
   }
 }
 
 
 void FrameElider::MarkDeConstruction() {
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     if (block->needs_frame()) {
       // Special case: The start block needs a frame.
       if (block->predecessors().empty()) {
@@ -47,15 +48,25 @@
       }
       // Find "frame -> no frame" transitions, inserting frame
       // deconstructions.
-      for (auto succ : block->successors()) {
+      for (RpoNumber& succ : block->successors()) {
         if (!InstructionBlockAt(succ)->needs_frame()) {
           DCHECK_EQ(1U, block->SuccessorCount());
+          const Instruction* last =
+              InstructionAt(block->last_instruction_index());
+          if (last->IsThrow() || last->IsTailCall() ||
+              last->IsDeoptimizeCall()) {
+            // We need to keep the frame if we exit the block through any
+            // of these.
+            continue;
+          }
+          // The only cases when we need to deconstruct are ret and jump.
+          DCHECK(last->IsRet() || last->IsJump());
           block->mark_must_deconstruct_frame();
         }
       }
     } else {
       // Find "no frame -> frame" transitions, inserting frame constructions.
-      for (auto succ : block->successors()) {
+      for (RpoNumber& succ : block->successors()) {
         if (InstructionBlockAt(succ)->needs_frame()) {
           DCHECK_NE(1U, block->SuccessorCount());
           InstructionBlockAt(succ)->mark_must_construct_frame();
@@ -68,7 +79,7 @@
 
 bool FrameElider::PropagateInOrder() {
   bool changed = false;
-  for (auto block : instruction_blocks()) {
+  for (InstructionBlock* block : instruction_blocks()) {
     changed |= PropagateIntoBlock(block);
   }
   return changed;
@@ -77,7 +88,7 @@
 
 bool FrameElider::PropagateReversed() {
   bool changed = false;
-  for (auto block : base::Reversed(instruction_blocks())) {
+  for (InstructionBlock* block : base::Reversed(instruction_blocks())) {
     changed |= PropagateIntoBlock(block);
   }
   return changed;
@@ -94,7 +105,7 @@
 
   // Propagate towards the end ("downwards") if there is a predecessor needing
   // a frame, but don't "bleed" from deferred code to non-deferred code.
-  for (auto pred : block->predecessors()) {
+  for (RpoNumber& pred : block->predecessors()) {
     if (InstructionBlockAt(pred)->needs_frame() &&
         (!InstructionBlockAt(pred)->IsDeferred() || block->IsDeferred())) {
       block->mark_needs_frame();
@@ -104,7 +115,7 @@
 
   // Propagate towards start ("upwards") if there are successors and all of
   // them need a frame.
-  for (auto succ : block->successors()) {
+  for (RpoNumber& succ : block->successors()) {
     if (!InstructionBlockAt(succ)->needs_frame()) return false;
   }
   block->mark_needs_frame();
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
index 387d6a9..91827d0 100644
--- a/src/compiler/frame-states.cc
+++ b/src/compiler/frame-states.cc
@@ -58,6 +58,9 @@
     case FrameStateType::kArgumentsAdaptor:
       os << "ARGUMENTS_ADAPTOR";
       break;
+    case FrameStateType::kTailCallerFunction:
+      os << "TAIL_CALLER_FRAME";
+      break;
     case FrameStateType::kConstructStub:
       os << "CONSTRUCT_STUB";
       break;
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
index 60ff9b5..2552bcb 100644
--- a/src/compiler/frame-states.h
+++ b/src/compiler/frame-states.h
@@ -79,10 +79,10 @@
   kJavaScriptFunction,   // Represents an unoptimized JavaScriptFrame.
   kInterpretedFunction,  // Represents an InterpretedFrame.
   kArgumentsAdaptor,     // Represents an ArgumentsAdaptorFrame.
+  kTailCallerFunction,   // Represents a frame removed by tail call elimination.
   kConstructStub         // Represents a ConstructStubFrame.
 };
 
-
 class FrameStateFunctionInfo {
  public:
   FrameStateFunctionInfo(FrameStateType type, int parameter_count,
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
index b08030b..3d93e15 100644
--- a/src/compiler/frame.cc
+++ b/src/compiler/frame.cc
@@ -13,17 +13,32 @@
 namespace compiler {
 
 Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
-    : needs_frame_((descriptor != nullptr) &&
-                   descriptor->RequiresFrameAsIncoming()),
-      frame_slot_count_(fixed_frame_size_in_slots),
+    : frame_slot_count_(fixed_frame_size_in_slots),
       callee_saved_slot_count_(0),
       spill_slot_count_(0),
       allocated_registers_(nullptr),
       allocated_double_registers_(nullptr) {}
 
+int Frame::AlignFrame(int alignment) {
+  DCHECK_EQ(0, callee_saved_slot_count_);
+  int alignment_slots = alignment / kPointerSize;
+  int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+  if (delta != alignment_slots) {
+    frame_slot_count_ += delta;
+    if (spill_slot_count_ != 0) {
+      spill_slot_count_ += delta;
+    }
+  }
+  return delta;
+}
+
+void FrameAccessState::MarkHasFrame(bool state) {
+  has_frame_ = state;
+  SetFrameAccessToDefault();
+}
 
 void FrameAccessState::SetFrameAccessToDefault() {
-  if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+  if (has_frame() && !FLAG_turbo_sp_frame_access) {
     SetFrameAccessToFP();
   } else {
     SetFrameAccessToSP();
@@ -32,16 +47,12 @@
 
 
 FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
-  const int offset =
-      (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
-      kPointerSize;
+  const int frame_offset = FrameSlotToFPOffset(spill_slot);
   if (access_frame_with_fp()) {
-    DCHECK(frame()->needs_frame());
-    return FrameOffset::FromFramePointer(offset);
+    return FrameOffset::FromFramePointer(frame_offset);
   } else {
     // No frame. Retrieve all parameters relative to stack pointer.
-    int sp_offset =
-        offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+    int sp_offset = frame_offset + GetSPToFPOffset();
     return FrameOffset::FromStackPointer(sp_offset);
   }
 }
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 011a0f0..d413d3e 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -59,9 +59,9 @@
 //       |- - - - - - - - -|   |                        |
 //   1   | saved frame ptr | Fixed                      |
 //       |- - - - - - - - -| Header <-- frame ptr       |
-//   2   |     Context     |   |                        |
+//   2   |Context/Frm. Type|   |                        |
 //       |- - - - - - - - -|   |                        |
-//   3   |JSFunction/Marker|   v                        |
+//   3   |   [JSFunction]  |   v                        |
 //       +-----------------+----                        |
 //   4   |    spill 1      |   ^                      Callee
 //       |- - - - - - - - -|   |                   frame slots
@@ -81,26 +81,13 @@
   explicit Frame(int fixed_frame_size_in_slots,
                  const CallDescriptor* descriptor);
 
-  inline bool needs_frame() const { return needs_frame_; }
-  inline void MarkNeedsFrame() { needs_frame_ = true; }
-
   inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
 
-  inline int GetSpToFpSlotCount() const {
-    return GetTotalFrameSlotCount() -
-           StandardFrameConstants::kFixedSlotCountAboveFp;
-  }
   inline int GetSavedCalleeRegisterSlotCount() const {
     return callee_saved_slot_count_;
   }
   inline int GetSpillSlotCount() const { return spill_slot_count_; }
 
-  inline void SetElidedFrameSizeInSlots(int slots) {
-    DCHECK_EQ(0, callee_saved_slot_count_);
-    DCHECK_EQ(0, spill_slot_count_);
-    frame_slot_count_ = slots;
-  }
-
   void SetAllocatedRegisters(BitVector* regs) {
     DCHECK(allocated_registers_ == nullptr);
     allocated_registers_ = regs;
@@ -115,33 +102,34 @@
     return !allocated_double_registers_->IsEmpty();
   }
 
-  int AlignSavedCalleeRegisterSlots() {
+  int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
     DCHECK_EQ(0, callee_saved_slot_count_);
-    needs_frame_ = true;
-    int delta = frame_slot_count_ & 1;
-    frame_slot_count_ += delta;
+    int alignment_slots = alignment / kPointerSize;
+    int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
+    if (delta != alignment_slots) {
+      frame_slot_count_ += delta;
+    }
     return delta;
   }
 
   void AllocateSavedCalleeRegisterSlots(int count) {
-    needs_frame_ = true;
     frame_slot_count_ += count;
     callee_saved_slot_count_ += count;
   }
 
   int AllocateSpillSlot(int width) {
     DCHECK_EQ(0, callee_saved_slot_count_);
-    needs_frame_ = true;
     int frame_slot_count_before = frame_slot_count_;
     int slot = AllocateAlignedFrameSlot(width);
     spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
     return slot;
   }
 
+  int AlignFrame(int alignment = kDoubleSize);
+
   int ReserveSpillSlots(size_t slot_count) {
     DCHECK_EQ(0, callee_saved_slot_count_);
     DCHECK_EQ(0, spill_slot_count_);
-    needs_frame_ = true;
     spill_slot_count_ += static_cast<int>(slot_count);
     frame_slot_count_ += static_cast<int>(slot_count);
     return frame_slot_count_ - 1;
@@ -163,7 +151,6 @@
   }
 
  private:
-  bool needs_frame_;
   int frame_slot_count_;
   int callee_saved_slot_count_;
   int spill_slot_count_;
@@ -205,21 +192,36 @@
 class FrameAccessState : public ZoneObject {
  public:
   explicit FrameAccessState(Frame* const frame)
-      : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
-    SetFrameAccessToDefault();
-  }
+      : frame_(frame),
+        access_frame_with_fp_(false),
+        sp_delta_(0),
+        has_frame_(false) {}
 
   Frame* frame() const { return frame_; }
+  void MarkHasFrame(bool state);
 
   int sp_delta() const { return sp_delta_; }
   void ClearSPDelta() { sp_delta_ = 0; }
   void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
 
   bool access_frame_with_fp() const { return access_frame_with_fp_; }
+
+  // Regardless of how we access slots on the stack - using sp or fp - do we
+  // have a frame, at the current stage in code generation.
+  bool has_frame() const { return has_frame_; }
+
   void SetFrameAccessToDefault();
   void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
   void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
 
+  int GetSPToFPSlotCount() const {
+    int frame_slot_count =
+        (has_frame() ? frame()->GetTotalFrameSlotCount() : kElidedFrameSlots) -
+        StandardFrameConstants::kFixedSlotCountAboveFp;
+    return frame_slot_count + sp_delta();
+  }
+  int GetSPToFPOffset() const { return GetSPToFPSlotCount() * kPointerSize; }
+
   // Get the frame offset for a given spill slot. The location depends on the
   // calling convention and the specific frame layout, and may thus be
   // architecture-specific. Negative spill slots indicate arguments on the
@@ -230,6 +232,7 @@
   Frame* const frame_;
   bool access_frame_with_fp_;
   int sp_delta_;
+  bool has_frame_;
 };
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 4107b0f..35e91fa 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -29,7 +29,7 @@
   auto it =
       std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
   moves->erase(it, moves->end());
-  for (auto move : *moves) {
+  for (MoveOperands* move : *moves) {
     if (!move->IsEliminated()) PerformMove(moves, move);
   }
 }
@@ -53,7 +53,7 @@
   // Perform a depth-first traversal of the move graph to resolve dependencies.
   // Any unperformed, unpending move with a source the same as this one's
   // destination blocks this one so recursively perform all such moves.
-  for (auto other : *moves) {
+  for (MoveOperands* other : *moves) {
     if (other->Blocks(destination) && !other->IsPending()) {
       // Though PerformMove can change any source operand in the move graph,
       // this call cannot create a blocking move via a swap (this loop does not
@@ -103,7 +103,7 @@
   // Any unperformed (including pending) move with a source of either this
   // move's source or destination needs to have their source changed to
   // reflect the state of affairs after the swap.
-  for (auto other : *moves) {
+  for (MoveOperands* other : *moves) {
     if (other->Blocks(source)) {
       other->set_source(destination);
     } else if (other->Blocks(destination)) {
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
index 7f4cc95..cb775e9 100644
--- a/src/compiler/graph-replay.cc
+++ b/src/compiler/graph-replay.cc
@@ -20,7 +20,7 @@
 void GraphReplayPrinter::PrintReplay(Graph* graph) {
   GraphReplayPrinter replay;
   PrintF("  Node* nil = graph()->NewNode(common()->Dead());\n");
-  Zone zone;
+  Zone zone(graph->zone()->allocator());
   AllNodes nodes(&zone, graph);
 
   // Allocate the nodes first.
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 0785176..301e390 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -197,7 +197,8 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   os << "{\n\"nodes\":[";
   JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
   os << "],\n\"edges\":[";
@@ -231,8 +232,8 @@
   void PrintInputs(InputIterator* i, int count, const char* prefix);
   void PrintType(Node* node);
 
-  void PrintLiveRange(LiveRange* range, const char* type, int vreg);
-  void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
+  void PrintLiveRange(const LiveRange* range, const char* type, int vreg);
+  void PrintLiveRangeChain(const TopLevelLiveRange* range, const char* type);
 
   class Tag final BASE_EMBEDDED {
    public:
@@ -505,31 +506,30 @@
   Tag tag(this, "intervals");
   PrintStringProperty("name", phase);
 
-  for (auto range : data->fixed_double_live_ranges()) {
+  for (const TopLevelLiveRange* range : data->fixed_double_live_ranges()) {
     PrintLiveRangeChain(range, "fixed");
   }
 
-  for (auto range : data->fixed_live_ranges()) {
+  for (const TopLevelLiveRange* range : data->fixed_live_ranges()) {
     PrintLiveRangeChain(range, "fixed");
   }
 
-  for (auto range : data->live_ranges()) {
+  for (const TopLevelLiveRange* range : data->live_ranges()) {
     PrintLiveRangeChain(range, "object");
   }
 }
 
-
-void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
+void GraphC1Visualizer::PrintLiveRangeChain(const TopLevelLiveRange* range,
                                             const char* type) {
   if (range == nullptr || range->IsEmpty()) return;
   int vreg = range->vreg();
-  for (LiveRange* child = range; child != nullptr; child = child->next()) {
+  for (const LiveRange* child = range; child != nullptr;
+       child = child->next()) {
     PrintLiveRange(child, type, vreg);
   }
 }
 
-
-void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
+void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
                                        int vreg) {
   if (range != nullptr && !range->IsEmpty()) {
     PrintIndent();
@@ -545,7 +545,7 @@
         os_ << " \"" << assigned_reg.ToString() << "\"";
       }
     } else if (range->spilled()) {
-      auto top = range->TopLevel();
+      const TopLevelLiveRange* top = range->TopLevel();
       int index = -1;
       if (top->HasSpillRange()) {
         index = kMaxInt;  // This hasn't been set yet.
@@ -564,8 +564,8 @@
     }
 
     os_ << " " << vreg;
-    for (auto interval = range->first_interval(); interval != nullptr;
-         interval = interval->next()) {
+    for (const UseInterval* interval = range->first_interval();
+         interval != nullptr; interval = interval->next()) {
       os_ << " [" << interval->start().value() << ", "
           << interval->end().value() << "[";
     }
@@ -584,14 +584,16 @@
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
   return os;
 }
 
 
 std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone)
       .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
   return os;
@@ -600,7 +602,8 @@
 
 std::ostream& operator<<(std::ostream& os,
                          const AsC1VRegisterAllocationData& ac) {
-  Zone tmp_zone;
+  base::AccountingAllocator allocator;
+  Zone tmp_zone(&allocator);
   GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
   return os;
 }
@@ -610,7 +613,8 @@
 const int kVisited = 2;
 
 std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
-  Zone local_zone;
+  base::AccountingAllocator allocator;
+  Zone local_zone(&allocator);
   ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
   ZoneStack<Node*> stack(&local_zone);
 
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index ba69617..ff1a17e 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -25,7 +25,7 @@
 
 
 void Graph::Decorate(Node* node) {
-  for (auto const decorator : decorators_) {
+  for (GraphDecorator* const decorator : decorators_) {
     decorator->Decorate(node);
   }
 }
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 1f61af8..ee05ad0 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -49,16 +49,13 @@
       return Operand(ToDoubleRegister(op));
     }
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
-    return Operand(offset.from_stack_pointer() ? esp : ebp,
-                   offset.offset() + extra);
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
-  Operand ToMaterializableOperand(int materializable_offset) {
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        FPOffsetToFrameSlot(materializable_offset));
-    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  Operand SlotToOperand(int slot, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return Operand(offset.from_stack_pointer() ? esp : ebp,
+                   offset.offset() + extra);
   }
 
   Operand HighOperand(InstructionOperand* op) {
@@ -333,6 +330,39 @@
     __ bind(&done);                                          \
   } while (false)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(esp, ebp);
+  __ pop(ebp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -349,18 +379,56 @@
     __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register, Register,
+                                                     Register) {
+  // There are not enough temp registers left on ia32 for a call instruction
+  // so we pick some scratch registers and save/restore them manually here.
+  int scratch_count = 3;
+  Register scratch1 = ebx;
+  Register scratch2 = ecx;
+  Register scratch3 = edx;
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &done, Label::kNear);
+
+  __ push(scratch1);
+  __ push(scratch2);
+  __ push(scratch3);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, scratch_count);
+  __ pop(scratch3);
+  __ pop(scratch2);
+  __ pop(scratch1);
+
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   IA32OperandConverter i(this, instr);
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
@@ -375,9 +443,14 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -402,6 +475,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -411,6 +485,10 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -469,7 +547,7 @@
       __ mov(i.OutputRegister(), ebp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ mov(i.OutputRegister(), Operand(ebp, 0));
       } else {
         __ mov(i.OutputRegister(), ebp);
@@ -530,38 +608,22 @@
       }
       break;
     case kIA32Cmp:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ cmp(operand, i.InputImmediate(index));
-        } else {
-          __ cmp(operand, i.InputRegister(index));
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ cmp(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ cmp(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(cmp);
+      break;
+    case kIA32Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
+    case kIA32Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
       break;
     case kIA32Test:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ test(operand, i.InputImmediate(index));
-        } else {
-          __ test(i.InputRegister(index), operand);
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ test(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ test(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(test);
+      break;
+    case kIA32Test16:
+      ASSEMBLE_COMPARE(test_w);
+      break;
+    case kIA32Test8:
+      ASSEMBLE_COMPARE(test_b);
       break;
     case kIA32Imul:
       if (HasImmediateInput(instr, 1)) {
@@ -632,6 +694,92 @@
         __ sar_cl(i.OutputOperand());
       }
       break;
+    case kIA32AddPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ add(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ add(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kIA32SubPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ sub(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ sub(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kIA32MulPair: {
+      __ imul(i.OutputRegister(1), i.InputOperand(0));
+      __ mov(i.TempRegister(0), i.InputOperand(1));
+      __ imul(i.TempRegister(0), i.InputOperand(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+      __ mov(i.OutputRegister(0), i.InputOperand(0));
+      // Multiplies the low words and stores them in eax and edx.
+      __ mul(i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+
+      break;
+    }
+    case kIA32ShlPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kIA32ShrPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kIA32SarPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
     case kIA32Ror:
       if (HasImmediateInput(instr, 1)) {
         __ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1476,21 +1624,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    // Assemble a prologue similar the to cdecl calling convention.
-    __ push(ebp);
-    __ mov(ebp, esp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    // TODO(turbofan): this prologue is redundant with OSR, but still needed for
-    // code aging.
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ push(ebp);
+      __ mov(ebp, esp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1536,17 +1679,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-    __ pop(ebp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-      __ pop(ebp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -1581,15 +1722,15 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int offset;
-      if (IsMaterializableFromFrame(src, &offset)) {
+      int slot;
+      if (IsMaterializableFromFrame(src, &slot)) {
         if (destination->IsRegister()) {
           Register dst = g.ToRegister(destination);
-          __ mov(dst, g.ToMaterializableOperand(offset));
+          __ mov(dst, g.SlotToOperand(slot));
         } else {
           DCHECK(destination->IsStackSlot());
           Operand dst = g.ToOperand(destination);
-          __ push(g.ToMaterializableOperand(offset));
+          __ push(g.SlotToOperand(slot));
           __ pop(dst);
         }
       } else if (destination->IsRegister()) {
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 61fd035..3cf2094 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -15,7 +15,11 @@
   V(IA32Add)                       \
   V(IA32And)                       \
   V(IA32Cmp)                       \
+  V(IA32Cmp16)                     \
+  V(IA32Cmp8)                      \
   V(IA32Test)                      \
+  V(IA32Test16)                    \
+  V(IA32Test8)                     \
   V(IA32Or)                        \
   V(IA32Xor)                       \
   V(IA32Sub)                       \
@@ -29,6 +33,12 @@
   V(IA32Shl)                       \
   V(IA32Shr)                       \
   V(IA32Sar)                       \
+  V(IA32AddPair)                   \
+  V(IA32SubPair)                   \
+  V(IA32MulPair)                   \
+  V(IA32ShlPair)                   \
+  V(IA32ShrPair)                   \
+  V(IA32SarPair)                   \
   V(IA32Ror)                       \
   V(IA32Lzcnt)                     \
   V(IA32Tzcnt)                     \
@@ -105,7 +115,6 @@
   V(IA32Poke)                      \
   V(IA32StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 093bc22..803fdf6 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -17,7 +17,11 @@
     case kIA32Add:
     case kIA32And:
     case kIA32Cmp:
+    case kIA32Cmp16:
+    case kIA32Cmp8:
     case kIA32Test:
+    case kIA32Test16:
+    case kIA32Test8:
     case kIA32Or:
     case kIA32Xor:
     case kIA32Sub:
@@ -31,6 +35,12 @@
     case kIA32Shl:
     case kIA32Shr:
     case kIA32Sar:
+    case kIA32AddPair:
+    case kIA32SubPair:
+    case kIA32MulPair:
+    case kIA32ShlPair:
+    case kIA32ShrPair:
+    case kIA32SarPair:
     case kIA32Ror:
     case kIA32Lzcnt:
     case kIA32Tzcnt:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index f649ba9..5c4acce 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -27,6 +27,30 @@
     return DefineAsRegister(node);
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kIA32Cmp:
+      case kIA32Test:
+        return rep == MachineRepresentation::kWord32 ||
+               rep == MachineRepresentation::kTagged;
+      case kIA32Cmp16:
+      case kIA32Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kIA32Cmp8:
+      case kIA32Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   bool CanBeImmediate(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
@@ -232,9 +256,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -404,10 +426,11 @@
   }
 }
 
+namespace {
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
@@ -456,18 +479,24 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, &cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitWord32And(Node* node) {
   VisitBinop(this, node, kIA32And);
@@ -579,6 +608,93 @@
   VisitShift(this, node, kIA32Sar);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+  // register and one mov instruction.
+  InstructionOperand inputs[] = {
+      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+  InstructionOperand temps[] = {g.TempRegister(edx)};
+
+  Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+                          Node* node) {
+  IA32OperandGenerator g(selector);
+
+  Node* shift = node->InputAt(2);
+  InstructionOperand shift_operand;
+  if (g.CanBeImmediate(shift)) {
+    shift_operand = g.UseImmediate(shift);
+  } else {
+    shift_operand = g.UseFixed(shift, ecx);
+  }
+  InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+                                 g.UseFixed(node->InputAt(1), edx),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kIA32ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitWord32PairShift(this, kIA32ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitWord32PairShift(this, kIA32SarPair, node);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, kIA32Ror);
@@ -746,6 +862,9 @@
   VisitRO(this, node, kSSEFloat64ToUint32);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRO(this, node, kSSEFloat64ToUint32);
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   VisitRO(this, node, kSSEFloat64ToFloat32);
@@ -987,6 +1106,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
 
 namespace {
 
@@ -1008,6 +1128,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1015,33 +1138,21 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation load_representation =
-      LoadRepresentationOf(input->op()).representation();
-  if (load_representation == MachineRepresentation::kWord32 ||
-      load_representation == MachineRepresentation::kTagged) {
-    return opcode == kIA32Cmp || opcode == kIA32Test;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
   if (cont->IsBranch()) {
-    selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+    selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
-    selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
-                   left, right);
+    selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
   }
 }
 
@@ -1057,6 +1168,36 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kIA32Cmp && opcode != kIA32Test) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kIA32Cmp ? kIA32Cmp8 : kIA32Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kIA32Cmp ? kIA32Cmp16 : kIA32Test16;
+    default:
+      return opcode;
+  }
+}
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1082,15 +1223,22 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
+      // TODO(epertoso): we should use `narrowed_opcode' here once we match
+      // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1098,15 +1246,21 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+    bool needs_byte_register =
+        narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
+    return VisitCompareWithMemoryOperand(
+        selector, narrowed_opcode, left,
+        needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+        cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1126,6 +1280,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1228,13 +1385,23 @@
 
 }  // namespace
 
-
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   IA32OperandGenerator g(this);
@@ -1265,7 +1432,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1275,32 +1442,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kIA32Add, &cont);
   }
   FlagsContinuation cont;
@@ -1310,7 +1479,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kIA32Sub, &cont);
   }
   FlagsContinuation cont;
@@ -1319,37 +1488,41 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index d2144cf..b005083 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -21,6 +21,8 @@
 #include "src/compiler/x64/instruction-codes-x64.h"
 #elif V8_TARGET_ARCH_PPC
 #include "src/compiler/ppc/instruction-codes-ppc.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/compiler/s390/instruction-codes-s390.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/compiler/x87/instruction-codes-x87.h"
 #else
@@ -39,40 +41,42 @@
 
 // Target-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
-#define COMMON_ARCH_OPCODE_LIST(V) \
-  V(ArchCallCodeObject)            \
-  V(ArchTailCallCodeObject)        \
-  V(ArchCallJSFunction)            \
-  V(ArchTailCallJSFunction)        \
-  V(ArchPrepareCallCFunction)      \
-  V(ArchCallCFunction)             \
-  V(ArchPrepareTailCall)           \
-  V(ArchJmp)                       \
-  V(ArchLookupSwitch)              \
-  V(ArchTableSwitch)               \
-  V(ArchNop)                       \
-  V(ArchThrowTerminator)           \
-  V(ArchDeoptimize)                \
-  V(ArchRet)                       \
-  V(ArchStackPointer)              \
-  V(ArchFramePointer)              \
-  V(ArchParentFramePointer)        \
-  V(ArchTruncateDoubleToI)         \
-  V(ArchStoreWithWriteBarrier)     \
-  V(CheckedLoadInt8)               \
-  V(CheckedLoadUint8)              \
-  V(CheckedLoadInt16)              \
-  V(CheckedLoadUint16)             \
-  V(CheckedLoadWord32)             \
-  V(CheckedLoadWord64)             \
-  V(CheckedLoadFloat32)            \
-  V(CheckedLoadFloat64)            \
-  V(CheckedStoreWord8)             \
-  V(CheckedStoreWord16)            \
-  V(CheckedStoreWord32)            \
-  V(CheckedStoreWord64)            \
-  V(CheckedStoreFloat32)           \
-  V(CheckedStoreFloat64)           \
+#define COMMON_ARCH_OPCODE_LIST(V)        \
+  V(ArchCallCodeObject)                   \
+  V(ArchTailCallCodeObjectFromJSFunction) \
+  V(ArchTailCallCodeObject)               \
+  V(ArchCallJSFunction)                   \
+  V(ArchTailCallJSFunctionFromJSFunction) \
+  V(ArchTailCallJSFunction)               \
+  V(ArchPrepareCallCFunction)             \
+  V(ArchCallCFunction)                    \
+  V(ArchPrepareTailCall)                  \
+  V(ArchJmp)                              \
+  V(ArchLookupSwitch)                     \
+  V(ArchTableSwitch)                      \
+  V(ArchNop)                              \
+  V(ArchThrowTerminator)                  \
+  V(ArchDeoptimize)                       \
+  V(ArchRet)                              \
+  V(ArchStackPointer)                     \
+  V(ArchFramePointer)                     \
+  V(ArchParentFramePointer)               \
+  V(ArchTruncateDoubleToI)                \
+  V(ArchStoreWithWriteBarrier)            \
+  V(CheckedLoadInt8)                      \
+  V(CheckedLoadUint8)                     \
+  V(CheckedLoadInt16)                     \
+  V(CheckedLoadUint16)                    \
+  V(CheckedLoadWord32)                    \
+  V(CheckedLoadWord64)                    \
+  V(CheckedLoadFloat32)                   \
+  V(CheckedLoadFloat64)                   \
+  V(CheckedStoreWord8)                    \
+  V(CheckedStoreWord16)                   \
+  V(CheckedStoreWord32)                   \
+  V(CheckedStoreWord64)                   \
+  V(CheckedStoreFloat32)                  \
+  V(CheckedStoreFloat64)                  \
   V(ArchStackSlot)
 
 #define ARCH_OPCODE_LIST(V)  \
@@ -110,7 +114,12 @@
 std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
 
 // The mode of the flags continuation (see below).
-enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+enum FlagsMode {
+  kFlags_none = 0,
+  kFlags_branch = 1,
+  kFlags_deoptimize = 2,
+  kFlags_set = 3
+};
 
 std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
 
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index adbfd5d..b612cd1 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -115,7 +115,7 @@
   if (IsBlockTerminator(instr)) {
     // Make sure that basic block terminators are not moved by adding them
     // as successor of every instruction.
-    for (auto node : graph_) {
+    for (ScheduleGraphNode* node : graph_) {
       node->AddSuccessor(new_node);
     }
   } else if (IsFixedRegisterParameter(instr)) {
@@ -134,7 +134,7 @@
       if (last_side_effect_instr_ != nullptr) {
         last_side_effect_instr_->AddSuccessor(new_node);
       }
-      for (auto load : pending_loads_) {
+      for (ScheduleGraphNode* load : pending_loads_) {
         load->AddSuccessor(new_node);
       }
       pending_loads_.clear();
@@ -149,7 +149,7 @@
     }
 
     // Look for operand dependencies.
-    for (auto node : graph_) {
+    for (ScheduleGraphNode* node : graph_) {
       if (HasOperandDependency(node->instruction(), instr)) {
         node->AddSuccessor(new_node);
       }
@@ -168,7 +168,7 @@
   ComputeTotalLatencies();
 
   // Add nodes which don't have dependencies to the ready list.
-  for (auto node : graph_) {
+  for (ScheduleGraphNode* node : graph_) {
     if (!node->HasUnscheduledPredecessor()) {
       ready_list.AddNode(node);
     }
@@ -177,12 +177,12 @@
   // Go through the ready list and schedule the instructions.
   int cycle = 0;
   while (!ready_list.IsEmpty()) {
-    auto candidate = ready_list.PopBestCandidate(cycle);
+    ScheduleGraphNode* candidate = ready_list.PopBestCandidate(cycle);
 
     if (candidate != nullptr) {
       sequence()->AddInstruction(candidate->instruction());
 
-      for (auto successor : candidate->successors()) {
+      for (ScheduleGraphNode* successor : candidate->successors()) {
         successor->DropUnscheduledPredecessor();
         successor->set_start_cycle(
             std::max(successor->start_cycle(),
@@ -220,7 +220,9 @@
     case kArchCallJSFunction:
       return kHasSideEffect;
 
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject:
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction:
       return kHasSideEffect | kIsBlockTerminator;
 
@@ -296,10 +298,10 @@
 
 
 void InstructionScheduler::ComputeTotalLatencies() {
-  for (auto node : base::Reversed(graph_)) {
+  for (ScheduleGraphNode* node : base::Reversed(graph_)) {
     int max_latency = 0;
 
-    for (auto successor : node->successors()) {
+    for (ScheduleGraphNode* successor : node->successors()) {
       DCHECK(successor->total_latency() != -1);
       if (successor->total_latency() > max_latency) {
         max_latency = successor->total_latency();
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 5cca888..e750aed 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -303,22 +303,32 @@
     DCHECK_NOT_NULL(false_block);
   }
 
-  // Creates a new flags continuation from the given condition and result node.
-  FlagsContinuation(FlagsCondition condition, Node* result)
-      : mode_(kFlags_set), condition_(condition), result_(result) {
-    DCHECK_NOT_NULL(result);
+  // Creates a new flags continuation for an eager deoptimization exit.
+  static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+                                         Node* frame_state) {
+    return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
+  }
+
+  // Creates a new flags continuation for a boolean value.
+  static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
+    return FlagsContinuation(kFlags_set, condition, result);
   }
 
   bool IsNone() const { return mode_ == kFlags_none; }
   bool IsBranch() const { return mode_ == kFlags_branch; }
+  bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
   bool IsSet() const { return mode_ == kFlags_set; }
   FlagsCondition condition() const {
     DCHECK(!IsNone());
     return condition_;
   }
+  Node* frame_state() const {
+    DCHECK(IsDeoptimize());
+    return frame_state_or_result_;
+  }
   Node* result() const {
     DCHECK(IsSet());
-    return result_;
+    return frame_state_or_result_;
   }
   BasicBlock* true_block() const {
     DCHECK(IsBranch());
@@ -355,11 +365,20 @@
   }
 
  private:
-  FlagsMode mode_;
+  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+                    Node* frame_state_or_result)
+      : mode_(mode),
+        condition_(condition),
+        frame_state_or_result_(frame_state_or_result) {
+    DCHECK_NOT_NULL(frame_state_or_result);
+  }
+
+  FlagsMode const mode_;
   FlagsCondition condition_;
-  Node* result_;             // Only valid if mode_ == kFlags_set.
-  BasicBlock* true_block_;   // Only valid if mode_ == kFlags_branch.
-  BasicBlock* false_block_;  // Only valid if mode_ == kFlags_branch.
+  Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize
+                                 // or mode_ == kFlags_set.
+  BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch.
+  BasicBlock* false_block_;      // Only valid if mode_ == kFlags_branch.
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 0f27e50..d172ed1 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -82,6 +82,9 @@
     }
     EndBlock(RpoNumber::FromInt(block->rpo_number()));
   }
+#if DEBUG
+  sequence()->ValidateSSA();
+#endif
 }
 
 
@@ -218,10 +221,25 @@
 
 
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
-  return node->OwnedBy(user) &&
-         schedule()->block(node) == schedule()->block(user) &&
-         (node->op()->HasProperty(Operator::kPure) ||
-          GetEffectLevel(node) == GetEffectLevel(user));
+  // 1. Both {user} and {node} must be in the same basic block.
+  if (schedule()->block(node) != schedule()->block(user)) {
+    return false;
+  }
+  // 2. Pure {node}s must be owned by the {user}.
+  if (node->op()->HasProperty(Operator::kPure)) {
+    return node->OwnedBy(user);
+  }
+  // 3. Impure {node}s must match the effect level of {user}.
+  if (GetEffectLevel(node) != GetEffectLevel(user)) {
+    return false;
+  }
+  // 4. Only {node} must have value edges pointing to {user}.
+  for (Edge const edge : node->use_edges()) {
+    if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
+      return false;
+    }
+  }
+  return true;
 }
 
 int InstructionSelector::GetVirtualRegister(const Node* node) {
@@ -597,15 +615,17 @@
         call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
 
     // If it was a syntactic tail call we need to drop the current frame and
-    // an arguments adaptor frame on top of it (if the latter is present).
+    // all the frames on top of it that are either an arguments adaptor frame
+    // or a tail caller frame.
     if (buffer->descriptor->SupportsTailCalls()) {
       frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
       buffer->frame_state_descriptor =
           buffer->frame_state_descriptor->outer_state();
-
-      if (buffer->frame_state_descriptor != nullptr &&
-          buffer->frame_state_descriptor->type() ==
-              FrameStateType::kArgumentsAdaptor) {
+      while (buffer->frame_state_descriptor != nullptr &&
+             (buffer->frame_state_descriptor->type() ==
+                  FrameStateType::kArgumentsAdaptor ||
+              buffer->frame_state_descriptor->type() ==
+                  FrameStateType::kTailCallerFunction)) {
         frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
         buffer->frame_state_descriptor =
             buffer->frame_state_descriptor->outer_state();
@@ -873,6 +893,10 @@
     }
     case IrOpcode::kCall:
       return VisitCall(node);
+    case IrOpcode::kDeoptimizeIf:
+      return VisitDeoptimizeIf(node);
+    case IrOpcode::kDeoptimizeUnless:
+      return VisitDeoptimizeUnless(node);
     case IrOpcode::kFrameState:
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
@@ -998,6 +1022,8 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kTruncateFloat64ToUint32:
+      return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
     case IrOpcode::kTruncateFloat32ToInt32:
       return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
     case IrOpcode::kTruncateFloat32ToUint32:
@@ -1128,6 +1154,30 @@
     }
     case IrOpcode::kCheckedStore:
       return VisitCheckedStore(node);
+    case IrOpcode::kInt32PairAdd:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairAdd(node);
+    case IrOpcode::kInt32PairSub:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairSub(node);
+    case IrOpcode::kInt32PairMul:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitInt32PairMul(node);
+    case IrOpcode::kWord32PairShl:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairShl(node);
+    case IrOpcode::kWord32PairShr:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairShr(node);
+    case IrOpcode::kWord32PairSar:
+      MarkAsWord32(NodeProperties::FindProjection(node, 0));
+      MarkAsWord32(NodeProperties::FindProjection(node, 1));
+      return VisitWord32PairSar(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1144,7 +1194,6 @@
 
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   OperandGenerator g(this);
-  frame_->MarkNeedsFrame();
   Emit(kArchFramePointer, g.DefineAsRegister(node));
 }
 
@@ -1351,6 +1400,20 @@
 
 #endif  // V8_TARGET_ARCH_32_BIT
 
+// 64 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_64_BIT
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+#endif  // V8_TARGET_ARCH_64_BIT
 
 void InstructionSelector::VisitFinishRegion(Node* node) {
   OperandGenerator g(this);
@@ -1429,6 +1492,12 @@
     case IrOpcode::kTryTruncateFloat64ToInt64:
     case IrOpcode::kTryTruncateFloat32ToUint64:
     case IrOpcode::kTryTruncateFloat64ToUint64:
+    case IrOpcode::kInt32PairAdd:
+    case IrOpcode::kInt32PairSub:
+    case IrOpcode::kInt32PairMul:
+    case IrOpcode::kWord32PairShl:
+    case IrOpcode::kWord32PairShr:
+    case IrOpcode::kWord32PairSar:
       if (ProjectionIndexOf(node->op()) == 0u) {
         Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
       } else {
@@ -1484,13 +1553,15 @@
     buffer.instruction_args.push_back(g.Label(handler));
   }
 
-  // (arm64 only) caller uses JSSP but callee might destroy it.
-  if (descriptor->UseNativeStack() &&
-      !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
-    flags |= CallDescriptor::kRestoreJSSP;
+  bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
+  bool to_native_stack = descriptor->UseNativeStack();
+  if (from_native_stack != to_native_stack) {
+    // (arm64 only) Mismatch in the use of stack pointers. One or the other
+    // has to be restored manually by the code generator.
+    flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
+                             : CallDescriptor::kRestoreCSP;
   }
 
-
   // Select the appropriate opcode based on the call type.
   InstructionCode opcode = kArchNop;
   switch (descriptor->kind()) {
@@ -1539,16 +1610,35 @@
 
     // Select the appropriate opcode based on the call type.
     InstructionCode opcode;
-    switch (descriptor->kind()) {
-      case CallDescriptor::kCallCodeObject:
-        opcode = kArchTailCallCodeObject;
-        break;
-      case CallDescriptor::kCallJSFunction:
-        opcode = kArchTailCallJSFunction;
-        break;
-      default:
-        UNREACHABLE();
-        return;
+    InstructionOperandVector temps(zone());
+    if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+      switch (descriptor->kind()) {
+        case CallDescriptor::kCallCodeObject:
+          opcode = kArchTailCallCodeObjectFromJSFunction;
+          break;
+        case CallDescriptor::kCallJSFunction:
+          opcode = kArchTailCallJSFunctionFromJSFunction;
+          break;
+        default:
+          UNREACHABLE();
+          return;
+      }
+      int temps_count = GetTempsCountForTailCallFromJSFunction();
+      for (int i = 0; i < temps_count; i++) {
+        temps.push_back(g.TempRegister());
+      }
+    } else {
+      switch (descriptor->kind()) {
+        case CallDescriptor::kCallCodeObject:
+          opcode = kArchTailCallCodeObject;
+          break;
+        case CallDescriptor::kCallJSFunction:
+          opcode = kArchTailCallJSFunction;
+          break;
+        default:
+          UNREACHABLE();
+          return;
+      }
     }
     opcode |= MiscField::encode(descriptor->flags());
 
@@ -1559,7 +1649,8 @@
 
     // Emit the tailcall instruction.
     Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
-         &buffer.instruction_args.front());
+         &buffer.instruction_args.front(), temps.size(),
+         temps.empty() ? nullptr : &temps.front());
   } else {
     FrameStateDescriptor* frame_state_descriptor =
         descriptor->NeedsFrameState()
@@ -1627,25 +1718,41 @@
   }
 }
 
+Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
+                                                 InstructionOperand output,
+                                                 InstructionOperand a,
+                                                 InstructionOperand b,
+                                                 Node* frame_state) {
+  size_t output_count = output.IsInvalid() ? 0 : 1;
+  InstructionOperand inputs[] = {a, b};
+  size_t input_count = arraysize(inputs);
+  return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
+                        frame_state);
+}
+
+Instruction* InstructionSelector::EmitDeoptimize(
+    InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+    size_t input_count, InstructionOperand* inputs, Node* frame_state) {
+  OperandGenerator g(this);
+  FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
+  InstructionOperandVector args(instruction_zone());
+  args.reserve(input_count + 1 + descriptor->GetTotalSize());
+  for (size_t i = 0; i < input_count; ++i) {
+    args.push_back(inputs[i]);
+  }
+  opcode |= MiscField::encode(static_cast<int>(input_count));
+  InstructionSequence::StateId const state_id =
+      sequence()->AddFrameStateDescriptor(descriptor);
+  args.push_back(g.TempImmediate(state_id.ToInt()));
+  StateObjectDeduplicator deduplicator(instruction_zone());
+  AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
+                                  &args, FrameStateInputKind::kAny,
+                                  instruction_zone());
+  return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
+              nullptr);
+}
 
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
-  OperandGenerator g(this);
-
-  FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
-
-  InstructionOperandVector args(instruction_zone());
-  args.reserve(desc->GetTotalSize() + 1);  // Include deopt id.
-
-  InstructionSequence::StateId state_id =
-      sequence()->AddFrameStateDescriptor(desc);
-  args.push_back(g.TempImmediate(state_id.ToInt()));
-
-  StateObjectDeduplicator deduplicator(instruction_zone());
-
-  AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
-                                  FrameStateInputKind::kAny,
-                                  instruction_zone());
-
   InstructionCode opcode = kArchDeoptimize;
   switch (kind) {
     case DeoptimizeKind::kEager:
@@ -1655,7 +1762,7 @@
       opcode |= MiscField::encode(Deoptimizer::SOFT);
       break;
   }
-  Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
+  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index a01cab4..9c1cd4c 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -100,6 +100,17 @@
   Instruction* Emit(Instruction* instr);
 
   // ===========================================================================
+  // ===== Architecture-independent deoptimization exit emission methods. ======
+  // ===========================================================================
+
+  Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
+                              InstructionOperand a, InstructionOperand b,
+                              Node* frame_state);
+  Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
+                              InstructionOperand* outputs, size_t input_count,
+                              InstructionOperand* inputs, Node* frame_state);
+
+  // ===========================================================================
   // ============== Architecture-independent CPU feature methods. ==============
   // ===========================================================================
 
@@ -213,6 +224,7 @@
   void InitializeCallBuffer(Node* call, CallBuffer* buffer,
                             CallBufferFlags flags, int stack_param_delta = 0);
   bool IsTailCallAddressImmediate();
+  int GetTempsCountForTailCallFromJSFunction();
 
   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
 
@@ -243,6 +255,8 @@
   void VisitProjection(Node* node);
   void VisitConstant(Node* node);
   void VisitCall(Node* call, BasicBlock* handler = nullptr);
+  void VisitDeoptimizeIf(Node* node);
+  void VisitDeoptimizeUnless(Node* node);
   void VisitTailCall(Node* call);
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index d4ec6bc..c757557 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -114,7 +114,7 @@
       return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
                 << "]";
     case InstructionOperand::IMMEDIATE: {
-      auto imm = ImmediateOperand::cast(op);
+      ImmediateOperand imm = ImmediateOperand::cast(op);
       switch (imm.type()) {
         case ImmediateOperand::INLINE:
           return os << "#" << imm.inline_value();
@@ -124,7 +124,7 @@
     }
     case InstructionOperand::EXPLICIT:
     case InstructionOperand::ALLOCATED: {
-      auto allocated = LocationOperand::cast(op);
+      LocationOperand allocated = LocationOperand::cast(op);
       if (op.IsStackSlot()) {
         os << "[stack:" << LocationOperand::cast(op).index();
       } else if (op.IsDoubleStackSlot()) {
@@ -214,7 +214,7 @@
 
 
 bool ParallelMove::IsRedundant() const {
-  for (auto move : *this) {
+  for (MoveOperands* move : *this) {
     if (!move->IsRedundant()) return false;
   }
   return true;
@@ -224,7 +224,7 @@
 MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
   MoveOperands* replacement = nullptr;
   MoveOperands* to_eliminate = nullptr;
-  for (auto curr : *this) {
+  for (MoveOperands* curr : *this) {
     if (curr->IsEliminated()) continue;
     if (curr->destination().EqualsCanonicalized(move->source())) {
       DCHECK(!replacement);
@@ -321,7 +321,7 @@
                          const PrintableParallelMove& printable) {
   const ParallelMove& pm = *printable.parallel_move_;
   bool first = true;
-  for (auto move : pm) {
+  for (MoveOperands* move : pm) {
     if (move->IsEliminated()) continue;
     if (!first) os << " ";
     first = false;
@@ -346,7 +346,7 @@
   PrintableInstructionOperand poi = {
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
       InstructionOperand()};
-  for (auto& op : pm.reference_operands_) {
+  for (const InstructionOperand& op : pm.reference_operands_) {
     if (!first) {
       os << ";";
     } else {
@@ -393,6 +393,8 @@
       return os;
     case kFlags_branch:
       return os << "branch";
+    case kFlags_deoptimize:
+      return os << "deoptimize";
     case kFlags_set:
       return os << "set";
   }
@@ -618,7 +620,7 @@
   return blocks;
 }
 
-void InstructionSequence::Validate() {
+void InstructionSequence::ValidateEdgeSplitForm() {
   // Validate blocks are in edge-split form: no block with multiple successors
   // has an edge to a block (== a successor) with more than one predecessors.
   for (const InstructionBlock* block : instruction_blocks()) {
@@ -633,14 +635,40 @@
   }
 }
 
+void InstructionSequence::ValidateDeferredBlockExitPaths() {
+  // A deferred block with more than one successor must have all its successors
+  // deferred.
+  for (const InstructionBlock* block : instruction_blocks()) {
+    if (!block->IsDeferred() || block->SuccessorCount() <= 1) continue;
+    for (RpoNumber successor_id : block->successors()) {
+      CHECK(InstructionBlockAt(successor_id)->IsDeferred());
+    }
+  }
+}
+
+void InstructionSequence::ValidateSSA() {
+  // TODO(mtrofin): We could use a local zone here instead.
+  BitVector definitions(VirtualRegisterCount(), zone());
+  for (const Instruction* instruction : *this) {
+    for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+      const InstructionOperand* output = instruction->OutputAt(i);
+      int vreg = (output->IsConstant())
+                     ? ConstantOperand::cast(output)->virtual_register()
+                     : UnallocatedOperand::cast(output)->virtual_register();
+      CHECK(!definitions.Contains(vreg));
+      definitions.Add(vreg);
+    }
+  }
+}
+
 void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
   int ao = 0;
-  for (auto const block : *blocks) {
+  for (InstructionBlock* const block : *blocks) {
     if (!block->IsDeferred()) {
       block->set_ao_number(RpoNumber::FromInt(ao++));
     }
   }
-  for (auto const block : *blocks) {
+  for (InstructionBlock* const block : *blocks) {
     if (block->IsDeferred()) {
       block->set_ao_number(RpoNumber::FromInt(ao++));
     }
@@ -665,10 +693,6 @@
       representations_(zone()),
       deoptimization_entries_(zone()) {
   block_starts_.reserve(instruction_blocks_->size());
-
-#if DEBUG
-  Validate();
-#endif
 }
 
 
@@ -730,7 +754,7 @@
   if (end == block_starts_.end() || *end > instruction_index) --end;
   DCHECK(*end <= instruction_index);
   size_t index = std::distance(begin, end);
-  auto block = instruction_blocks_->at(index);
+  InstructionBlock* block = instruction_blocks_->at(index);
   DCHECK(block->code_start() <= instruction_index &&
          instruction_index < block->code_end());
   return block;
@@ -861,15 +885,15 @@
   os << "  instructions: [" << block->code_start() << ", " << block->code_end()
      << ")\n  predecessors:";
 
-  for (auto pred : block->predecessors()) {
+  for (RpoNumber pred : block->predecessors()) {
     os << " B" << pred.ToInt();
   }
   os << "\n";
 
-  for (auto phi : block->phis()) {
+  for (const PhiInstruction* phi : block->phis()) {
     PrintableInstructionOperand printable_op = {config, phi->output()};
     os << "     phi: " << printable_op << " =";
-    for (auto input : phi->operands()) {
+    for (int input : phi->operands()) {
       os << " v" << input;
     }
     os << "\n";
@@ -886,7 +910,7 @@
     os << "   " << buf.start() << ": " << printable_instr << "\n";
   }
 
-  for (auto succ : block->successors()) {
+  for (RpoNumber succ : block->successors()) {
     os << " B" << succ.ToInt();
   }
   os << "\n";
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 9c978ce..a1fe494 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -773,6 +773,9 @@
     DCHECK(output_count == 0 || outputs != nullptr);
     DCHECK(input_count == 0 || inputs != nullptr);
     DCHECK(temp_count == 0 || temps != nullptr);
+    // TODO(jarin/mstarzinger): Handle this gracefully. See crbug.com/582702.
+    CHECK(InputCountField::is_valid(input_count));
+
     size_t total_extra_ops = output_count + input_count + temp_count;
     if (total_extra_ops != 0) total_extra_ops--;
     int size = static_cast<int>(
@@ -812,6 +815,23 @@
            OutputCount() == 0 && TempCount() == 0;
   }
 
+  bool IsDeoptimizeCall() const {
+    return arch_opcode() == ArchOpcode::kArchDeoptimize ||
+           FlagsModeField::decode(opcode()) == kFlags_deoptimize;
+  }
+
+  bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
+  bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
+  bool IsTailCall() const {
+    return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
+           arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
+           arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
+           arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+  }
+  bool IsThrow() const {
+    return arch_opcode() == ArchOpcode::kArchThrowTerminator;
+  }
+
   enum GapPosition {
     START,
     END,
@@ -1354,7 +1374,9 @@
   void PrintBlock(const RegisterConfiguration* config, int block_id) const;
   void PrintBlock(int block_id) const;
 
-  void Validate();
+  void ValidateEdgeSplitForm();
+  void ValidateDeferredBlockExitPaths();
+  void ValidateSSA();
 
  private:
   friend std::ostream& operator<<(std::ostream& os,
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index ff31abe..8824a03 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -4,9 +4,11 @@
 
 #include "src/compiler/int64-lowering.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
@@ -24,36 +26,44 @@
       graph_(graph),
       machine_(machine),
       common_(common),
-      state_(graph, 4),
+      state_(graph, 3),
       stack_(zone),
-      replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
-      signature_(signature) {
+      replacements_(nullptr),
+      signature_(signature),
+      placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
+                                  graph->start())) {
+  replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
   memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
 }
 
 void Int64Lowering::LowerGraph() {
-  if (4 != kPointerSize) {
+  if (!machine()->Is32()) {
     return;
   }
-  stack_.push(graph()->end());
+  stack_.push_back({graph()->end(), 0});
   state_.Set(graph()->end(), State::kOnStack);
 
   while (!stack_.empty()) {
-    Node* top = stack_.top();
-    if (state_.Get(top) == State::kInputsPushed) {
-      stack_.pop();
-      state_.Set(top, State::kVisited);
-      // All inputs of top have already been reduced, now reduce top.
-      LowerNode(top);
+    NodeState& top = stack_.back();
+    if (top.input_index == top.node->InputCount()) {
+      // All inputs of top have already been lowered, now lower top.
+      stack_.pop_back();
+      state_.Set(top.node, State::kVisited);
+      LowerNode(top.node);
     } else {
-      // Push all children onto the stack.
-      for (Node* input : top->inputs()) {
-        if (state_.Get(input) == State::kUnvisited) {
-          stack_.push(input);
-          state_.Set(input, State::kOnStack);
+      // Push the next input onto the stack.
+      Node* input = top.node->InputAt(top.input_index++);
+      if (state_.Get(input) == State::kUnvisited) {
+        if (input->opcode() == IrOpcode::kPhi) {
+          // To break cycles with phi nodes we push phis on a separate stack so
+          // that they are processed after all other nodes.
+          PreparePhiReplacement(input);
+          stack_.push_front({input, 0});
+        } else {
+          stack_.push_back({input, 0});
         }
+        state_.Set(input, State::kOnStack);
       }
-      state_.Set(top, State::kInputsPushed);
     }
   }
 }
@@ -122,6 +132,8 @@
         }
         NodeProperties::ChangeOp(node, load_op);
         ReplaceNode(node, node, high_node);
+      } else {
+        DefaultLowering(node);
       }
       break;
     }
@@ -164,30 +176,11 @@
         node->ReplaceInput(2, GetReplacementLow(value));
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
+      } else {
+        DefaultLowering(node);
       }
       break;
     }
-    case IrOpcode::kWord64And: {
-      DCHECK(node->InputCount() == 2);
-      Node* left = node->InputAt(0);
-      Node* right = node->InputAt(1);
-
-      Node* low_node =
-          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
-                           GetReplacementLow(right));
-      Node* high_node =
-          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
-                           GetReplacementHigh(right));
-      ReplaceNode(node, low_node, high_node);
-      break;
-    }
-    case IrOpcode::kTruncateInt64ToInt32: {
-      DCHECK(node->InputCount() == 1);
-      Node* input = node->InputAt(0);
-      ReplaceNode(node, GetReplacementLow(input), nullptr);
-      node->NullAllInputs();
-      break;
-    }
     case IrOpcode::kStart: {
       int parameter_count = GetParameterCountAfterLowering(signature());
       // Only exchange the node if the parameter count actually changed.
@@ -248,8 +241,517 @@
       }
       break;
     }
+    case IrOpcode::kWord64And: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      ReplaceNode(node, GetReplacementLow(input), nullptr);
+      node->NullAllInputs();
+      break;
+    }
+    // todo(ahaas): I added a list of missing instructions here to make merging
+    // easier when I do them one by one.
+    // kExprI64Add:
+    case IrOpcode::kInt64Add: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+
+    // kExprI64Sub:
+    case IrOpcode::kInt64Sub: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairSub());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Mul:
+    case IrOpcode::kInt64Mul: {
+      DCHECK(node->InputCount() == 2);
+
+      Node* right = node->InputAt(1);
+      node->ReplaceInput(1, GetReplacementLow(right));
+      node->AppendInput(zone(), GetReplacementHigh(right));
+
+      Node* left = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(left));
+      node->InsertInput(zone(), 1, GetReplacementHigh(left));
+
+      NodeProperties::ChangeOp(node, machine()->Int32PairMul());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64DivS:
+    // kExprI64DivU:
+    // kExprI64RemS:
+    // kExprI64RemU:
+    // kExprI64Ior:
+    case IrOpcode::kWord64Or: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32Or(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32Or(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+
+    // kExprI64Xor:
+    case IrOpcode::kWord64Xor: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Shl:
+    case IrOpcode::kWord64Shl: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairShl());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64ShrU:
+    case IrOpcode::kWord64Shr: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairShr());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64ShrS:
+    case IrOpcode::kWord64Sar: {
+      // TODO(turbofan): if the shift count >= 32, then we can set the low word
+      // of the output to 0 and just calculate the high word.
+      DCHECK(node->InputCount() == 2);
+      Node* shift = node->InputAt(1);
+      if (HasReplacementLow(shift)) {
+        // We do not have to care about the high word replacement, because
+        // the shift can only be between 0 and 63 anyways.
+        node->ReplaceInput(1, GetReplacementLow(shift));
+      }
+
+      Node* value = node->InputAt(0);
+      node->ReplaceInput(0, GetReplacementLow(value));
+      node->InsertInput(zone(), 1, GetReplacementHigh(value));
+
+      NodeProperties::ChangeOp(node, machine()->Word32PairSar());
+      // We access the additional return values through projections.
+      Node* low_node = graph()->NewNode(common()->Projection(0), node);
+      Node* high_node = graph()->NewNode(common()->Projection(1), node);
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    // kExprI64Eq:
+    case IrOpcode::kWord64Equal: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      // TODO(wasm): Use explicit comparisons and && here?
+      Node* replacement = graph()->NewNode(
+          machine()->Word32Equal(),
+          graph()->NewNode(
+              machine()->Word32Or(),
+              graph()->NewNode(machine()->Word32Xor(), GetReplacementLow(left),
+                               GetReplacementLow(right)),
+              graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
+                               GetReplacementHigh(right))),
+          graph()->NewNode(common()->Int32Constant(0)));
+
+      ReplaceNode(node, replacement, nullptr);
+      break;
+    }
+    // kExprI64LtS:
+    case IrOpcode::kInt64LessThan: {
+      LowerComparison(node, machine()->Int32LessThan(),
+                      machine()->Uint32LessThan());
+      break;
+    }
+    case IrOpcode::kInt64LessThanOrEqual: {
+      LowerComparison(node, machine()->Int32LessThan(),
+                      machine()->Uint32LessThanOrEqual());
+      break;
+    }
+    case IrOpcode::kUint64LessThan: {
+      LowerComparison(node, machine()->Uint32LessThan(),
+                      machine()->Uint32LessThan());
+      break;
+    }
+    case IrOpcode::kUint64LessThanOrEqual: {
+      LowerComparison(node, machine()->Uint32LessThan(),
+                      machine()->Uint32LessThanOrEqual());
+      break;
+    }
+
+    // kExprI64SConvertI32:
+    case IrOpcode::kChangeInt32ToInt64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      // We use SAR to preserve the sign in the high word.
+      ReplaceNode(
+          node, input,
+          graph()->NewNode(machine()->Word32Sar(), input,
+                           graph()->NewNode(common()->Int32Constant(31))));
+      node->NullAllInputs();
+      break;
+    }
+    // kExprI64UConvertI32: {
+    case IrOpcode::kChangeUint32ToUint64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      ReplaceNode(node, input, graph()->NewNode(common()->Int32Constant(0)));
+      node->NullAllInputs();
+      break;
+    }
+    // kExprF64ReinterpretI64:
+    case IrOpcode::kBitcastInt64ToFloat64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      Node* stack_slot = graph()->NewNode(
+          machine()->StackSlot(MachineRepresentation::kWord64));
+
+      Node* store_high_word = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kWord32,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+          GetReplacementHigh(input), graph()->start(), graph()->start());
+
+      Node* store_low_word = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kWord32,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+          GetReplacementLow(input), store_high_word, graph()->start());
+
+      Node* load =
+          graph()->NewNode(machine()->Load(MachineType::Float64()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(0)),
+                           store_low_word, graph()->start());
+
+      ReplaceNode(node, load, nullptr);
+      break;
+    }
+    // kExprI64ReinterpretF64:
+    case IrOpcode::kBitcastFloat64ToInt64: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      if (HasReplacementLow(input)) {
+        input = GetReplacementLow(input);
+      }
+      Node* stack_slot = graph()->NewNode(
+          machine()->StackSlot(MachineRepresentation::kWord64));
+      Node* store = graph()->NewNode(
+          machine()->Store(
+              StoreRepresentation(MachineRepresentation::kFloat64,
+                                  WriteBarrierKind::kNoWriteBarrier)),
+          stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
+          graph()->start(), graph()->start());
+
+      Node* high_node =
+          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(4)), store,
+                           graph()->start());
+
+      Node* low_node =
+          graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
+                           graph()->NewNode(common()->Int32Constant(0)), store,
+                           graph()->start());
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kWord64Ror: {
+      DCHECK(node->InputCount() == 2);
+      Node* input = node->InputAt(0);
+      Node* shift = HasReplacementLow(node->InputAt(1))
+                        ? GetReplacementLow(node->InputAt(1))
+                        : node->InputAt(1);
+      Int32Matcher m(shift);
+      if (m.HasValue()) {
+        // Precondition: 0 <= shift < 64.
+        int32_t shift_value = m.Value() & 0x3f;
+        if (shift_value == 0) {
+          ReplaceNode(node, GetReplacementLow(input),
+                      GetReplacementHigh(input));
+        } else if (shift_value == 32) {
+          ReplaceNode(node, GetReplacementHigh(input),
+                      GetReplacementLow(input));
+        } else {
+          Node* low_input;
+          Node* high_input;
+          if (shift_value < 32) {
+            low_input = GetReplacementLow(input);
+            high_input = GetReplacementHigh(input);
+          } else {
+            low_input = GetReplacementHigh(input);
+            high_input = GetReplacementLow(input);
+          }
+          int32_t masked_shift_value = shift_value & 0x1f;
+          Node* masked_shift =
+              graph()->NewNode(common()->Int32Constant(masked_shift_value));
+          Node* inv_shift = graph()->NewNode(
+              common()->Int32Constant(32 - masked_shift_value));
+
+          Node* low_node = graph()->NewNode(
+              machine()->Word32Or(),
+              graph()->NewNode(machine()->Word32Shr(), low_input, masked_shift),
+              graph()->NewNode(machine()->Word32Shl(), high_input, inv_shift));
+          Node* high_node = graph()->NewNode(
+              machine()->Word32Or(), graph()->NewNode(machine()->Word32Shr(),
+                                                      high_input, masked_shift),
+              graph()->NewNode(machine()->Word32Shl(), low_input, inv_shift));
+          ReplaceNode(node, low_node, high_node);
+        }
+      } else {
+        Node* safe_shift = shift;
+        if (!machine()->Word32ShiftIsSafe()) {
+          safe_shift =
+              graph()->NewNode(machine()->Word32And(), shift,
+                               graph()->NewNode(common()->Int32Constant(0x1f)));
+        }
+
+        // By creating this bit-mask with SAR and SHL we do not have to deal
+        // with shift == 0 as a special case.
+        Node* inv_mask = graph()->NewNode(
+            machine()->Word32Shl(),
+            graph()->NewNode(machine()->Word32Sar(),
+                             graph()->NewNode(common()->Int32Constant(
+                                 std::numeric_limits<int32_t>::min())),
+                             safe_shift),
+            graph()->NewNode(common()->Int32Constant(1)));
+
+        Node* bit_mask =
+            graph()->NewNode(machine()->Word32Xor(), inv_mask,
+                             graph()->NewNode(common()->Int32Constant(-1)));
+
+        // We have to mask the shift value for this comparison. If
+        // !machine()->Word32ShiftIsSafe() then the masking should already be
+        // part of the graph.
+        Node* masked_shift6 = shift;
+        if (machine()->Word32ShiftIsSafe()) {
+          masked_shift6 =
+              graph()->NewNode(machine()->Word32And(), shift,
+                               graph()->NewNode(common()->Int32Constant(0x3f)));
+        }
+
+        Diamond lt32(
+            graph(), common(),
+            graph()->NewNode(machine()->Int32LessThan(), masked_shift6,
+                             graph()->NewNode(common()->Int32Constant(32))));
+
+        // The low word and the high word can be swapped either at the input or
+        // at the output. We swap the inputs so that shift does not have to be
+        // kept for so long in a register.
+        Node* input_low =
+            lt32.Phi(MachineRepresentation::kWord32, GetReplacementLow(input),
+                     GetReplacementHigh(input));
+        Node* input_high =
+            lt32.Phi(MachineRepresentation::kWord32, GetReplacementHigh(input),
+                     GetReplacementLow(input));
+
+        Node* rotate_low =
+            graph()->NewNode(machine()->Word32Ror(), input_low, safe_shift);
+        Node* rotate_high =
+            graph()->NewNode(machine()->Word32Ror(), input_high, safe_shift);
+
+        Node* low_node = graph()->NewNode(
+            machine()->Word32Or(),
+            graph()->NewNode(machine()->Word32And(), rotate_low, bit_mask),
+            graph()->NewNode(machine()->Word32And(), rotate_high, inv_mask));
+
+        Node* high_node = graph()->NewNode(
+            machine()->Word32Or(),
+            graph()->NewNode(machine()->Word32And(), rotate_high, bit_mask),
+            graph()->NewNode(machine()->Word32And(), rotate_low, inv_mask));
+
+        ReplaceNode(node, low_node, high_node);
+      }
+      break;
+    }
+    // kExprI64Clz:
+    case IrOpcode::kWord64Clz: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      Diamond d(
+          graph(), common(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(input),
+                           graph()->NewNode(common()->Int32Constant(0))));
+
+      Node* low_node = d.Phi(
+          MachineRepresentation::kWord32,
+          graph()->NewNode(machine()->Int32Add(),
+                           graph()->NewNode(machine()->Word32Clz(),
+                                            GetReplacementLow(input)),
+                           graph()->NewNode(common()->Int32Constant(32))),
+          graph()->NewNode(machine()->Word32Clz(), GetReplacementHigh(input)));
+      ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    // kExprI64Ctz:
+    case IrOpcode::kWord64Ctz: {
+      DCHECK(node->InputCount() == 1);
+      DCHECK(machine()->Word32Ctz().IsSupported());
+      Node* input = node->InputAt(0);
+      Diamond d(
+          graph(), common(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementLow(input),
+                           graph()->NewNode(common()->Int32Constant(0))));
+      Node* low_node =
+          d.Phi(MachineRepresentation::kWord32,
+                graph()->NewNode(machine()->Int32Add(),
+                                 graph()->NewNode(machine()->Word32Ctz().op(),
+                                                  GetReplacementHigh(input)),
+                                 graph()->NewNode(common()->Int32Constant(32))),
+                graph()->NewNode(machine()->Word32Ctz().op(),
+                                 GetReplacementLow(input)));
+      ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    // kExprI64Popcnt:
+    case IrOpcode::kWord64Popcnt: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      // We assume that a Word64Popcnt node only has been created if
+      // Word32Popcnt is actually supported.
+      DCHECK(machine()->Word32Popcnt().IsSupported());
+      ReplaceNode(node, graph()->NewNode(
+                            machine()->Int32Add(),
+                            graph()->NewNode(machine()->Word32Popcnt().op(),
+                                             GetReplacementLow(input)),
+                            graph()->NewNode(machine()->Word32Popcnt().op(),
+                                             GetReplacementHigh(input))),
+                  graph()->NewNode(common()->Int32Constant(0)));
+      break;
+    }
+    case IrOpcode::kPhi: {
+      MachineRepresentation rep = PhiRepresentationOf(node->op());
+      if (rep == MachineRepresentation::kWord64) {
+        // The replacement nodes have already been created, we only have to
+        // replace placeholder nodes.
+        Node* low_node = GetReplacementLow(node);
+        Node* high_node = GetReplacementHigh(node);
+        for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+          low_node->ReplaceInput(i, GetReplacementLow(node->InputAt(i)));
+          high_node->ReplaceInput(i, GetReplacementHigh(node->InputAt(i)));
+        }
+      } else {
+        DefaultLowering(node);
+      }
+      break;
+    }
+
     default: { DefaultLowering(node); }
   }
+}  // NOLINT(readability/fn_size)
+
+void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
+                                    const Operator* low_word_op) {
+  DCHECK(node->InputCount() == 2);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  Node* replacement = graph()->NewNode(
+      machine()->Word32Or(),
+      graph()->NewNode(high_word_op, GetReplacementHigh(left),
+                       GetReplacementHigh(right)),
+      graph()->NewNode(
+          machine()->Word32And(),
+          graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(left),
+                           GetReplacementHigh(right)),
+          graph()->NewNode(low_word_op, GetReplacementLow(left),
+                           GetReplacementLow(right))));
+
+  ReplaceNode(node, replacement, nullptr);
 }
 
 bool Int64Lowering::DefaultLowering(Node* node) {
@@ -294,6 +796,32 @@
   DCHECK(result);
   return result;
 }
+
+void Int64Lowering::PreparePhiReplacement(Node* phi) {
+  MachineRepresentation rep = PhiRepresentationOf(phi->op());
+  if (rep == MachineRepresentation::kWord64) {
+    // We have to create the replacements for a phi node before we actually
+    // lower the phi to break potential cycles in the graph. The replacements of
+    // input nodes do not exist yet, so we use a placeholder node to pass the
+    // graph verifier.
+    int value_count = phi->op()->ValueInputCount();
+    Node** inputs_low = zone()->NewArray<Node*>(value_count + 1);
+    Node** inputs_high = zone()->NewArray<Node*>(value_count + 1);
+    for (int i = 0; i < value_count; i++) {
+      inputs_low[i] = placeholder_;
+      inputs_high[i] = placeholder_;
+    }
+    inputs_low[value_count] = NodeProperties::GetControlInput(phi, 0);
+    inputs_high[value_count] = NodeProperties::GetControlInput(phi, 0);
+    ReplaceNode(phi,
+                graph()->NewNode(
+                    common()->Phi(MachineRepresentation::kWord32, value_count),
+                    value_count + 1, inputs_low, false),
+                graph()->NewNode(
+                    common()->Phi(MachineRepresentation::kWord32, value_count),
+                    value_count + 1, inputs_high, false));
+  }
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 79a95dc..7f6ef9a 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_INT64_REDUCER_H_
-#define V8_COMPILER_INT64_REDUCER_H_
+#ifndef V8_COMPILER_INT64_LOWERING_H_
+#define V8_COMPILER_INT64_LOWERING_H_
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
@@ -24,7 +24,7 @@
   void LowerGraph();
 
  private:
-  enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+  enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
 
   struct Replacement {
     Node* low;
@@ -37,27 +37,39 @@
   CommonOperatorBuilder* common() const { return common_; }
   Signature<MachineRepresentation>* signature() const { return signature_; }
 
+  void PrepareReplacements(Node* node);
+  void PushNode(Node* node);
   void LowerNode(Node* node);
   bool DefaultLowering(Node* node);
+  void LowerComparison(Node* node, const Operator* signed_op,
+                       const Operator* unsigned_op);
+  void PrepareProjectionReplacements(Node* node);
 
   void ReplaceNode(Node* old, Node* new_low, Node* new_high);
   bool HasReplacementLow(Node* node);
   Node* GetReplacementLow(Node* node);
   bool HasReplacementHigh(Node* node);
   Node* GetReplacementHigh(Node* node);
+  void PreparePhiReplacement(Node* phi);
+
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
 
   Zone* zone_;
   Graph* const graph_;
   MachineOperatorBuilder* machine_;
   CommonOperatorBuilder* common_;
   NodeMarker<State> state_;
-  ZoneStack<Node*> stack_;
+  ZoneDeque<NodeState> stack_;
   Replacement* replacements_;
   Signature<MachineRepresentation>* signature_;
+  Node* placeholder_;
 };
 
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_COMPILER_INT64_REDUCER_H_
+#endif  // V8_COMPILER_INT64_LOWERING_H_
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 3023031..41f9c30 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -117,18 +117,59 @@
   return NoChange();
 }
 
-
-// ES6 draft 08-24-14, section 20.2.2.19.
+// ES6 section 20.2.2.19 Math.imul ( x, y )
 Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
-    // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
-    Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+  if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
+    // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
+    //                                             NumberToUint32(b))
+    Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
+    Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
+    Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
     return Replace(value);
   }
   return NoChange();
 }
 
+// ES6 section 20.2.2.10 Math.ceil ( x )
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.ceil(a:number) -> NumberCeil(a)
+    Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Unsigned32())) {
+    // Math.clz32(a:unsigned32) -> NumberClz32(a)
+    Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
+    return Replace(value);
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
+    Node* value = graph()->NewNode(
+        simplified()->NumberClz32(),
+        graph()->NewNode(simplified()->NumberToUint32(), r.left()));
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 draft 08-24-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.floor(a:number) -> NumberFloor(a)
+    Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
 
 // ES6 draft 08-24-14, section 20.2.2.17.
 Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
@@ -145,25 +186,32 @@
 // ES6 section 20.2.2.28 Math.round ( x )
 Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
-    // Math.round(a:integer \/ -0 \/ NaN) -> a
-    return Replace(r.left());
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.round(a:number) -> NumberRound(a)
+    Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+    return Replace(value);
   }
-  if (r.InputsMatchOne(Type::Number()) &&
-      machine()->Float64RoundUp().IsSupported()) {
-    // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
-    //                                Float64Sub(i, #1.0), i)
-    //   where i = Float64RoundUp(a)
-    Node* value = r.left();
-    Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
-    Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
-    return Replace(graph()->NewNode(
-        common()->Select(MachineRepresentation::kFloat64),
-        graph()->NewNode(machine()->Float64LessThan(),
-                         jsgraph()->Float64Constant(0.5), real),
-        graph()->NewNode(machine()->Float64Sub(), integer,
-                         jsgraph()->Float64Constant(1.0)),
-        integer));
+  return NoChange();
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.sqrt(a:number) -> Float64Sqrt(a)
+    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.trunc(a:number) -> NumberTrunc(a)
+    Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+    return Replace(value);
   }
   return NoChange();
 }
@@ -181,12 +229,27 @@
     case kMathImul:
       reduction = ReduceMathImul(node);
       break;
+    case kMathClz32:
+      reduction = ReduceMathClz32(node);
+      break;
+    case kMathCeil:
+      reduction = ReduceMathCeil(node);
+      break;
+    case kMathFloor:
+      reduction = ReduceMathFloor(node);
+      break;
     case kMathFround:
       reduction = ReduceMathFround(node);
       break;
     case kMathRound:
       reduction = ReduceMathRound(node);
       break;
+    case kMathSqrt:
+      reduction = ReduceMathSqrt(node);
+      break;
+    case kMathTrunc:
+      reduction = ReduceMathTrunc(node);
+      break;
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index b64b335..dfeb409 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -33,8 +33,13 @@
   Reduction ReduceFunctionCall(Node* node);
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
+  Reduction ReduceMathCeil(Node* node);
+  Reduction ReduceMathClz32(Node* node);
+  Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathFround(Node* node);
   Reduction ReduceMathRound(Node* node);
+  Reduction ReduceMathSqrt(Node* node);
+  Reduction ReduceMathTrunc(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index 34217e7..892dcc7 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -329,16 +329,8 @@
     Node* check = effect =
         graph()->NewNode(javascript()->StrictEqual(), target, array_function,
                          context, effect, control);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* deoptimize =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                         frame_state, effect, if_false);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceValueInput(node, array_function, 0);
@@ -355,16 +347,8 @@
       Node* check = effect =
           graph()->NewNode(javascript()->StrictEqual(), target, target_function,
                            context, effect, control);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
 
       // Specialize the JSCallFunction node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -473,16 +457,8 @@
     Node* check = effect =
         graph()->NewNode(javascript()->StrictEqual(), target, array_function,
                          context, effect, control);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* deoptimize =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                         frame_state, effect, if_false);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
 
     // Turn the {node} into a {JSCreateArray} call.
     NodeProperties::ReplaceEffectInput(node, effect);
@@ -505,16 +481,8 @@
       Node* check = effect =
           graph()->NewNode(javascript()->StrictEqual(), target, target_function,
                            context, effect, control);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
 
       // Specialize the JSCallConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index f40f05d..9ffae15 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public AdvancedReducer {
+class JSCallReducer final : public Reducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -29,12 +29,9 @@
   };
   typedef base::Flags<Flag> Flags;
 
-  JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+  JSCallReducer(JSGraph* jsgraph, Flags flags,
                 MaybeHandle<Context> native_context)
-      : AdvancedReducer(editor),
-        jsgraph_(jsgraph),
-        flags_(flags),
-        native_context_(native_context) {}
+      : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
 
   Reduction Reduce(Node* node) final;
 
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index df5c8d0..2003363 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -905,8 +905,17 @@
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
         // Allocate a mutable HeapNumber box and store the value into it.
-        value = effect = AllocateMutableHeapNumber(
-            Handle<HeapNumber>::cast(boilerplate_value)->value(),
+        Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNoFlags, Operator::kNoThrow);
+        value = effect = graph()->NewNode(
+            common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+            jsgraph()->NoContextConstant(), effect, control);
+        effect = graph()->NewNode(
+            simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+            value, jsgraph()->Constant(
+                       Handle<HeapNumber>::cast(boilerplate_value)->value()),
             effect, control);
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
@@ -1028,23 +1037,6 @@
   return builder.Finish();
 }
 
-Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
-                                                  Node* control) {
-  // TODO(turbofan): Support inline allocation of MutableHeapNumber
-  // (requires proper alignment on Allocate, and Begin/FinishRegion).
-  Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
-  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      isolate(), jsgraph()->zone(), callable.descriptor(), 0,
-      CallDescriptor::kNoFlags, Operator::kNoThrow);
-  Node* result = effect = graph()->NewNode(
-      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-      jsgraph()->NoContextConstant(), effect, control);
-  effect = graph()->NewNode(
-      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
-      jsgraph()->Constant(value), effect, control);
-  return result;
-}
-
 MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
     Node* node) {
   Node* const closure = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index d9d184b..52e7ec2 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -70,7 +70,6 @@
                                     Handle<JSObject> boilerplate,
                                     PretenureFlag pretenure,
                                     AllocationSiteUsageContext* site_context);
-  Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
 
   // Infers the LiteralsArray to use for a given {node}.
   MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index df2d908..1f12579 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -45,6 +45,8 @@
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
     case IrOpcode::kBranch:
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
       // TODO(mstarzinger): If typing is enabled then simplified lowering will
       // have inserted the correct ChangeBoolToBit, otherwise we need to perform
       // poor-man's representation inference here and insert manual change.
@@ -68,14 +70,9 @@
     ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token),    \
                         CallDescriptor::kPatchableCallSiteWithNop | flags); \
   }
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
-REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
 REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
 REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
 REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
-REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
 REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
 REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
 REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
@@ -85,19 +82,39 @@
   void JSGenericLowering::Lower##op(Node* node) { \
     ReplaceWithRuntimeCall(node, fun);            \
   }
-REPLACE_RUNTIME_CALL(JSEqual, Runtime::kEqual)
-REPLACE_RUNTIME_CALL(JSNotEqual, Runtime::kNotEqual)
-REPLACE_RUNTIME_CALL(JSStrictEqual, Runtime::kStrictEqual)
-REPLACE_RUNTIME_CALL(JSStrictNotEqual, Runtime::kStrictNotEqual)
-REPLACE_RUNTIME_CALL(JSLessThan, Runtime::kLessThan)
-REPLACE_RUNTIME_CALL(JSGreaterThan, Runtime::kGreaterThan)
-REPLACE_RUNTIME_CALL(JSLessThanOrEqual, Runtime::kLessThanOrEqual)
-REPLACE_RUNTIME_CALL(JSGreaterThanOrEqual, Runtime::kGreaterThanOrEqual)
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
 REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
 #undef REPLACE_RUNTIME_CALL
 
+#define REPLACE_STUB_CALL(Name)                                   \
+  void JSGenericLowering::LowerJS##Name(Node* node) {             \
+    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+    Callable callable = CodeFactory::Name(isolate());             \
+    ReplaceWithStubCall(node, callable, flags);                   \
+  }
+REPLACE_STUB_CALL(Add)
+REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(BitwiseAnd)
+REPLACE_STUB_CALL(BitwiseOr)
+REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(LessThan)
+REPLACE_STUB_CALL(LessThanOrEqual)
+REPLACE_STUB_CALL(GreaterThan)
+REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(Equal)
+REPLACE_STUB_CALL(NotEqual)
+REPLACE_STUB_CALL(StrictEqual)
+REPLACE_STUB_CALL(StrictNotEqual)
+REPLACE_STUB_CALL(ToBoolean)
+REPLACE_STUB_CALL(ToInteger)
+REPLACE_STUB_CALL(ToLength)
+REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToName)
+REPLACE_STUB_CALL(ToObject)
+REPLACE_STUB_CALL(ToString)
+#undef REPLACE_STUB_CALL
+
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
   Operator::Properties properties = node->op()->properties();
@@ -134,42 +151,6 @@
 }
 
 
-void JSGenericLowering::LowerJSToBoolean(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToBoolean(isolate());
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
-}
-
-
-void JSGenericLowering::LowerJSToNumber(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToNumber(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToString(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToString(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToName(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToName(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
-void JSGenericLowering::LowerJSToObject(Node* node) {
-  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  Callable callable = CodeFactory::ToObject(isolate());
-  ReplaceWithStubCall(node, callable, flags);
-}
-
-
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
   Node* closure = NodeProperties::GetValueInput(node, 2);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -546,15 +527,13 @@
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
   // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
   // initial length limit for arrays with "fast" elements kind.
   if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
-      (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
-      length < JSArray::kInitialMaxFastElementArray) {
+      p.length() < JSArray::kInitialMaxFastElementArray) {
     Callable callable = CodeFactory::FastCloneShallowArray(isolate());
     ReplaceWithStubCall(node, callable, flags);
   } else {
@@ -567,7 +546,6 @@
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
-  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -575,8 +553,9 @@
   // Use the FastCloneShallowObjectStub only for shallow boilerplates without
   // elements up to the number of properties that the stubs can handle.
   if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
-      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+      p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    Callable callable =
+        CodeFactory::FastCloneShallowObject(isolate(), p.length());
     ReplaceWithStubCall(node, callable, flags);
   } else {
     ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 132dec6..d8c9f17 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -171,16 +171,8 @@
       Node* check =
           graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
                            jsgraph()->Constant(property_cell_value));
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
       break;
     }
     case PropertyCellType::kConstantType: {
@@ -191,16 +183,8 @@
       Type* property_cell_value_type = Type::TaggedSigned();
       if (property_cell_value->IsHeapObject()) {
         // Deoptimize if the {value} is a Smi.
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check, control);
-        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* deoptimize =
-            graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                             frame_state, effect, if_true);
-        // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-        NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-        Revisit(graph()->end());
-        control = graph()->NewNode(common()->IfFalse(), branch);
+        control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                                   effect, control);
 
         // Load the {value} map check against the {property_cell} map.
         Node* value_map = effect =
@@ -213,16 +197,8 @@
             jsgraph()->HeapConstant(property_cell_value_map));
         property_cell_value_type = Type::TaggedPointer();
       }
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* deoptimize =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                           frame_state, effect, if_false);
-      // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-      NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-      Revisit(graph()->end());
-      control = graph()->NewNode(common()->IfTrue(), branch);
+      control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, effect, control);
       effect = graph()->NewNode(
           simplified()->StoreField(
               AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index e938798..98ca7aa 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -29,6 +29,10 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
+Node* JSGraph::OptimizedOutConstant() {
+  return CACHED(kOptimizedOutConstant,
+                HeapConstant(factory()->optimized_out()));
+}
 
 Node* JSGraph::UndefinedConstant() {
   return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 5a25ed0..06e8030 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -41,6 +41,7 @@
   // Canonicalized global constants.
   Node* CEntryStubConstant(int result_size);
   Node* EmptyFixedArrayConstant();
+  Node* OptimizedOutConstant();
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
@@ -136,6 +137,7 @@
   enum CachedNode {
     kCEntryStubConstant,
     kEmptyFixedArrayConstant,
+    kOptimizedOutConstant,
     kUndefinedConstant,
     kTheHoleConstant,
     kTrueConstant,
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index cd5637b..0e0508b 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -121,7 +121,10 @@
 
 bool JSInliningHeuristic::CandidateCompare::operator()(
     const Candidate& left, const Candidate& right) const {
-  return left.node != right.node && left.calls >= right.calls;
+  if (left.calls != right.calls) {
+    return left.calls > right.calls;
+  }
+  return left.node < right.node;
 }
 
 
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 2244f9b..e3254bd 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -263,6 +263,35 @@
                                     node->InputAt(0), outer_frame_state);
 }
 
+Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
+  FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+  Handle<SharedFunctionInfo> shared;
+  frame_info.shared_info().ToHandle(&shared);
+
+  Node* function = frame_state->InputAt(kFrameStateFunctionInput);
+
+  // If we are inlining a tail call drop caller's frame state and an
+  // arguments adaptor if it exists.
+  frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  if (frame_state->opcode() == IrOpcode::kFrameState) {
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
+      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+    }
+  }
+
+  const FrameStateFunctionInfo* state_info =
+      jsgraph_->common()->CreateFrameStateFunctionInfo(
+          FrameStateType::kTailCallerFunction, 0, 0, shared);
+
+  const Operator* op = jsgraph_->common()->FrameState(
+      BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
+  const Operator* op0 = jsgraph_->common()->StateValues(0);
+  Node* node0 = jsgraph_->graph()->NewNode(op0);
+  return jsgraph_->graph()->NewNode(op, node0, node0, node0,
+                                    jsgraph_->UndefinedConstant(), function,
+                                    frame_state);
+}
 
 namespace {
 
@@ -271,7 +300,10 @@
   DisallowHeapAllocation no_gc;
   Isolate* const isolate = shared_info->GetIsolate();
   Code* const construct_stub = shared_info->construct_stub();
-  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+         construct_stub !=
+             *isolate->builtins()->JSBuiltinsConstructStubForDerived() &&
+         construct_stub != *isolate->builtins()->JSConstructStubApi();
 }
 
 bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
@@ -380,7 +412,7 @@
     return NoChange();
   }
 
-  Zone zone;
+  Zone zone(info_->isolate()->allocator());
   ParseInfo parse_info(&zone, function);
   CompilationInfo info(&parse_info);
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
@@ -395,17 +427,6 @@
     return NoChange();
   }
 
-  // In strong mode, in case of too few arguments we need to throw a TypeError
-  // so we must not inline this call.
-  int parameter_count = info.literal()->parameter_count();
-  if (is_strong(info.language_mode()) &&
-      call.formal_arguments() < parameter_count) {
-    TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   if (!Compiler::EnsureDeoptimizationSupport(&info)) {
     TRACE("Not inlining %s into %s because deoptimization support failed\n",
           shared_info->DebugName()->ToCString().get(),
@@ -508,10 +529,25 @@
     NodeProperties::ReplaceEffectInput(node, convert);
   }
 
+  // If we are inlining a JS call at tail position then we have to pop current
+  // frame state and its potential arguments adaptor frame state in order to
+  // make the call stack be consistent with non-inlining case.
+  // After that we add a tail caller frame state which lets deoptimizer handle
+  // the case when the outermost function inlines a tail call (it should remove
+  // potential arguments adaptor frame that belongs to outermost function when
+  // deopt happens).
+  if (node->opcode() == IrOpcode::kJSCallFunction) {
+    const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+    if (p.tail_call_mode() == TailCallMode::kAllow) {
+      frame_state = CreateTailCallerFrameState(node, frame_state);
+    }
+  }
+
   // Insert argument adaptor frame if required. The callees formal parameter
   // count (i.e. value outputs of start node minus target, receiver, new target,
   // arguments count and context) have to match the number of arguments passed
   // to the call.
+  int parameter_count = info.literal()->parameter_count();
   DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 99eff96..d0ab7c0 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -45,6 +45,8 @@
                                    FrameStateType frame_state_type,
                                    Handle<SharedFunctionInfo> shared);
 
+  Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
+
   Reduction InlineCall(Node* call, Node* new_target, Node* context,
                        Node* frame_state, Node* start, Node* end);
 };
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index abeb110..034ee6f 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -15,7 +15,6 @@
 #include "src/compiler/operator-properties.h"
 #include "src/counters.h"
 #include "src/objects-inl.h"
-#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -23,11 +22,7 @@
 
 JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
                                          DeoptimizationMode mode)
-    : AdvancedReducer(editor),
-      jsgraph_(jsgraph),
-      mode_(mode),
-      type_cache_(TypeCache::Get()) {}
-
+    : AdvancedReducer(editor), jsgraph_(jsgraph), mode_(mode) {}
 
 Reduction JSIntrinsicLowering::Reduce(Node* node) {
   if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
@@ -45,8 +40,6 @@
       return ReduceDoubleHi(node);
     case Runtime::kInlineDoubleLo:
       return ReduceDoubleLo(node);
-    case Runtime::kInlineIncrementStatsCounter:
-      return ReduceIncrementStatsCounter(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
     case Runtime::kInlineIsTypedArray:
@@ -57,12 +50,6 @@
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
       return ReduceIsSmi(node);
-    case Runtime::kInlineMathClz32:
-      return ReduceMathClz32(node);
-    case Runtime::kInlineMathFloor:
-      return ReduceMathFloor(node);
-    case Runtime::kInlineMathSqrt:
-      return ReduceMathSqrt(node);
     case Runtime::kInlineValueOf:
       return ReduceValueOf(node);
     case Runtime::kInlineFixedArrayGet:
@@ -95,10 +82,12 @@
       return ReduceToString(node);
     case Runtime::kInlineCall:
       return ReduceCall(node);
-    case Runtime::kInlineTailCall:
-      return ReduceTailCall(node);
+    case Runtime::kInlineNewObject:
+      return ReduceNewObject(node);
     case Runtime::kInlineGetSuperConstructor:
       return ReduceGetSuperConstructor(node);
+    case Runtime::kInlineGetOrdinaryHasInstance:
+      return ReduceGetOrdinaryHasInstance(node);
     default:
       break;
   }
@@ -149,40 +138,23 @@
 
 
 Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
+  // Tell the compiler to assume number input.
+  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+                                   node->InputAt(0), graph()->start());
+  node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractHighWord32());
 }
 
 
 Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
+  // Tell the compiler to assume number input.
+  Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+                                   node->InputAt(0), graph()->start());
+  node->ReplaceInput(0, renamed);
   return Change(node, machine()->Float64ExtractLowWord32());
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
-  if (!FLAG_native_code_counters) return ChangeToUndefined(node);
-  HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
-  if (!m.HasValue() || !m.Value()->IsString()) {
-    return ChangeToUndefined(node);
-  }
-  base::SmartArrayPointer<char> name =
-      Handle<String>::cast(m.Value())->ToCString();
-  StatsCounter counter(jsgraph()->isolate(), name.get());
-  if (!counter.Enabled()) return ChangeToUndefined(node);
-
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  FieldAccess access = AccessBuilder::ForStatsCounter();
-  Node* cnt = jsgraph()->ExternalConstant(ExternalReference(&counter));
-  Node* load =
-      graph()->NewNode(simplified()->LoadField(access), cnt, effect, control);
-  Node* inc =
-      graph()->NewNode(machine()->Int32Add(), load, jsgraph()->OneConstant());
-  Node* store = graph()->NewNode(simplified()->StoreField(access), cnt, inc,
-                                 load, control);
-  return ChangeToUndefined(node, store);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceIsInstanceType(
     Node* node, InstanceType instance_type) {
   // if (%_IsSmi(value)) {
@@ -232,22 +204,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
-  return Change(node, machine()->Word32Clz());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
-  if (!machine()->Float64RoundDown().IsSupported()) return NoChange();
-  return Change(node, machine()->Float64RoundDown().op());
-}
-
-
-Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
-  return Change(node, machine()->Float64Sqrt());
-}
-
-
 Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
   // if (%_IsSmi(value)) {
   //   return value;
@@ -385,44 +341,8 @@
 
 
 Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // ToInteger is a no-op on integer values and -0.
-  Type* value_type = NodeProperties::GetType(value);
-  if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = value;
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    vfalse = efalse =
-        graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
-                         context, frame_state, efalse, if_false);
-    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
-  // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
-  ReplaceWithValue(node, value, effect, control);
-  return Changed(value);
+  NodeProperties::ChangeOp(node, javascript()->ToInteger());
+  return Changed(node);
 }
 
 
@@ -439,38 +359,8 @@
 
 
 Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
-    if (value_type->Max() <= 0.0) {
-      value = jsgraph()->ZeroConstant();
-    } else if (value_type->Min() >= kMaxSafeInteger) {
-      value = jsgraph()->Constant(kMaxSafeInteger);
-    } else {
-      if (value_type->Min() <= 0.0) {
-        value = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
-                             jsgraph()->ZeroConstant()),
-            jsgraph()->ZeroConstant(), value);
-        value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
-        NodeProperties::SetType(value, value_type);
-      }
-      if (value_type->Max() > kMaxSafeInteger) {
-        value = graph()->NewNode(
-            common()->Select(MachineRepresentation::kTagged),
-            graph()->NewNode(simplified()->NumberLessThanOrEqual(),
-                             jsgraph()->Constant(kMaxSafeInteger), value),
-            jsgraph()->Constant(kMaxSafeInteger), value);
-        value_type =
-            Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
-        NodeProperties::SetType(value, value_type);
-      }
-    }
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-  return Change(node, CodeFactory::ToLength(isolate()), 0);
+  NodeProperties::ChangeOp(node, javascript()->ToLength());
+  return Changed(node);
 }
 
 
@@ -506,17 +396,18 @@
   return Changed(node);
 }
 
-
-Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
-  size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(node,
-                           javascript()->CallFunction(arity, VectorSlotPair(),
-                                                      ConvertReceiverMode::kAny,
-                                                      TailCallMode::kAllow));
-  return Changed(node);
+Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+  Node* new_target = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* value = graph()->NewNode(javascript()->Create(), constructor,
+                                 new_target, context, frame_state, effect);
+  ReplaceWithValue(node, value, value);
+  return Replace(value);
 }
 
-
 Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
   Node* active_function = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -528,6 +419,16 @@
                 active_function_map, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  return Change(node, javascript()->LoadContext(
+                          0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
+                native_context, context, effect);
+}
 
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
                                       Node* b) {
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index d8e1102..a43ed01 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -13,7 +13,6 @@
 
 // Forward declarations.
 class Callable;
-class TypeCache;
 
 
 namespace compiler {
@@ -43,13 +42,9 @@
   Reduction ReduceDeoptimizeNow(Node* node);
   Reduction ReduceDoubleHi(Node* node);
   Reduction ReduceDoubleLo(Node* node);
-  Reduction ReduceIncrementStatsCounter(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
-  Reduction ReduceMathClz32(Node* node);
-  Reduction ReduceMathFloor(Node* node);
-  Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceValueOf(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
@@ -66,8 +61,9 @@
   Reduction ReduceToPrimitive(Node* node);
   Reduction ReduceToString(Node* node);
   Reduction ReduceCall(Node* node);
-  Reduction ReduceTailCall(Node* node);
+  Reduction ReduceNewObject(Node* node);
   Reduction ReduceGetSuperConstructor(Node* node);
+  Reduction ReduceGetOrdinaryHasInstance(Node* node);
 
   Reduction Change(Node* node, const Operator* op);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -86,11 +82,9 @@
   MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
   DeoptimizationMode mode() const { return mode_; }
-  TypeCache const& type_cache() const { return type_cache_; }
 
   JSGraph* const jsgraph_;
   DeoptimizationMode const mode_;
-  TypeCache const& type_cache_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index 2c11794..d1353d2 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -108,27 +108,36 @@
   ZoneVector<Node*> effects(zone());
   ZoneVector<Node*> controls(zone());
 
-  // The list of "exiting" controls, which currently go to a single deoptimize.
-  // TODO(bmeurer): Consider using an IC as fallback.
-  Node* const exit_effect = effect;
-  ZoneVector<Node*> exit_controls(zone());
-
   // Ensure that {index} matches the specified {name} (if {index} is given).
   if (index != nullptr) {
     Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
                                    index, jsgraph()->HeapConstant(name));
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-    exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-    control = graph()->NewNode(common()->IfTrue(), branch);
+    control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               effect, control);
+  }
+
+  // Check if {receiver} may be a number.
+  bool receiverissmi_possible = false;
+  for (PropertyAccessInfo const& access_info : access_infos) {
+    if (access_info.receiver_type()->Is(Type::Number())) {
+      receiverissmi_possible = true;
+      break;
+    }
   }
 
   // Ensure that {receiver} is a heap object.
   Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
-  control = graph()->NewNode(common()->IfFalse(), branch);
-  Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+  Node* receiverissmi_control = nullptr;
   Node* receiverissmi_effect = effect;
+  if (receiverissmi_possible) {
+    Node* branch = graph()->NewNode(common()->Branch(), check, control);
+    control = graph()->NewNode(common()->IfFalse(), branch);
+    receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+    receiverissmi_effect = effect;
+  } else {
+    control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               effect, control);
+  }
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
   // all (polymorphic) branches.
@@ -138,7 +147,8 @@
 
   // Generate code for the various different property access patterns.
   Node* fallthrough_control = control;
-  for (PropertyAccessInfo const& access_info : access_infos) {
+  for (size_t j = 0; j < access_infos.size(); ++j) {
+    PropertyAccessInfo const& access_info = access_infos[j];
     Node* this_value = value;
     Node* this_receiver = receiver;
     Node* this_effect = effect;
@@ -154,37 +164,52 @@
       Node* check =
           graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
                            jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-      Node* branch =
-          graph()->NewNode(common()->Branch(), check, fallthrough_control);
-      fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      if (j == access_infos.size() - 1) {
+        this_control =
+            graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                             this_effect, fallthrough_control);
+        fallthrough_control = nullptr;
+      } else {
+        Node* branch =
+            graph()->NewNode(common()->Branch(), check, fallthrough_control);
+        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+        this_control = graph()->NewNode(common()->IfTrue(), branch);
+      }
     } else {
       // Emit a (sequence of) map checks for other {receiver}s.
       ZoneVector<Node*> this_controls(zone());
       ZoneVector<Node*> this_effects(zone());
+      int num_classes = access_info.receiver_type()->NumClasses();
       for (auto i = access_info.receiver_type()->Classes(); !i.Done();
            i.Advance()) {
+        DCHECK_LT(0, num_classes);
         Handle<Map> map = i.Current();
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
                              receiver_map, jsgraph()->Constant(map));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
-        this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-        this_effects.push_back(this_effect);
+        if (--num_classes == 0 && j == access_infos.size() - 1) {
+          this_controls.push_back(
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, fallthrough_control));
+          this_effects.push_back(this_effect);
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          this_effects.push_back(this_effect);
+        }
       }
 
       // The Number case requires special treatment to also deal with Smis.
       if (receiver_type->Is(Type::Number())) {
-        // Join this check with the "receiver is smi" check above, and mark the
-        // "receiver is smi" check as "consumed" so that we don't deoptimize if
-        // the {receiver} is actually a Smi.
-        if (receiverissmi_control != nullptr) {
-          this_controls.push_back(receiverissmi_control);
-          this_effects.push_back(receiverissmi_effect);
-          receiverissmi_control = receiverissmi_effect = nullptr;
-        }
+        // Join this check with the "receiver is smi" check above.
+        DCHECK_NOT_NULL(receiverissmi_effect);
+        DCHECK_NOT_NULL(receiverissmi_control);
+        this_effects.push_back(receiverissmi_effect);
+        this_controls.push_back(receiverissmi_control);
+        receiverissmi_effect = receiverissmi_control = nullptr;
       }
 
       // Create dominating Merge+EffectPhi for this {receiver} type.
@@ -212,23 +237,14 @@
     // Generate the actual property access.
     if (access_info.IsNotFound()) {
       DCHECK_EQ(AccessMode::kLoad, access_mode);
-      if (is_strong(language_mode)) {
-        // TODO(bmeurer/mstarzinger): Add support for lowering inside try
-        // blocks rewiring the IfException edge to a runtime call/throw.
-        exit_controls.push_back(this_control);
-        continue;
-      } else {
-        this_value = jsgraph()->UndefinedConstant();
-      }
+      this_value = jsgraph()->UndefinedConstant();
     } else if (access_info.IsDataConstant()) {
       this_value = jsgraph()->Constant(access_info.constant());
       if (access_mode == AccessMode::kStore) {
         Node* check = graph()->NewNode(
             simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
       }
     } else {
       DCHECK(access_info.IsDataField());
@@ -253,10 +269,9 @@
                                jsgraph()->Int32Constant(
                                    1 << JSArrayBuffer::WasNeutered::kShift)),
               jsgraph()->Int32Constant(0));
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
           break;
         }
       }
@@ -292,11 +307,9 @@
         if (field_type->Is(Type::UntaggedFloat64())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check, this_control);
-          exit_controls.push_back(
-              graph()->NewNode(common()->IfFalse(), branch));
-          this_control = graph()->NewNode(common()->IfTrue(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, this_control);
           this_value = graph()->NewNode(common()->Guard(Type::Number()),
                                         this_value, this_control);
 
@@ -335,46 +348,30 @@
         } else if (field_type->Is(Type::TaggedSigned())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check, this_control);
-          exit_controls.push_back(
-              graph()->NewNode(common()->IfFalse(), branch));
-          this_control = graph()->NewNode(common()->IfTrue(), branch);
+          this_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               this_effect, this_control);
           this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
                                         this_value, this_control);
         } else if (field_type->Is(Type::TaggedPointer())) {
           Node* check =
               graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
-          if (field_type->NumClasses() > 0) {
-            // Emit a (sequence of) map checks for the value.
-            ZoneVector<Node*> this_controls(zone());
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
+          if (field_type->NumClasses() == 1) {
+            // Emit a map check for the value.
             Node* this_value_map = this_effect = graph()->NewNode(
                 simplified()->LoadField(AccessBuilder::ForMap()), this_value,
                 this_effect, this_control);
-            for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
-              Handle<Map> field_map(i.Current());
-              check = graph()->NewNode(
-                  simplified()->ReferenceEqual(Type::Internal()),
-                  this_value_map, jsgraph()->Constant(field_map));
-              branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-              this_control = graph()->NewNode(common()->IfFalse(), branch);
-              this_controls.push_back(
-                  graph()->NewNode(common()->IfTrue(), branch));
-            }
-            exit_controls.push_back(this_control);
-            int const this_control_count =
-                static_cast<int>(this_controls.size());
+            Node* check = graph()->NewNode(
+                simplified()->ReferenceEqual(Type::Internal()), this_value_map,
+                jsgraph()->Constant(field_type->Classes().Current()));
             this_control =
-                (this_control_count == 1)
-                    ? this_controls.front()
-                    : graph()->NewNode(common()->Merge(this_control_count),
-                                       this_control_count,
-                                       &this_controls.front());
+                graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                 frame_state, this_effect, this_control);
+          } else {
+            DCHECK_EQ(0, field_type->NumClasses());
           }
         } else {
           DCHECK(field_type->Is(Type::Tagged()));
@@ -403,39 +400,7 @@
     controls.push_back(this_control);
   }
 
-  // Collect the fallthrough control as final "exit" control.
-  if (fallthrough_control != control) {
-    // Mark the last fallthrough branch as deferred.
-    MarkAsDeferred(fallthrough_control);
-  }
-  exit_controls.push_back(fallthrough_control);
-
-  // Also collect the "receiver is smi" control if we didn't handle the case of
-  // Number primitives in the polymorphic branches above.
-  if (receiverissmi_control != nullptr) {
-    // Mark the "receiver is smi" case as deferred.
-    MarkAsDeferred(receiverissmi_control);
-    DCHECK_EQ(exit_effect, receiverissmi_effect);
-    exit_controls.push_back(receiverissmi_control);
-  }
-
-  // Generate the single "exit" point, where we get if either all map/instance
-  // type checks failed, or one of the assumptions inside one of the cases
-  // failes (i.e. failing prototype chain check).
-  // TODO(bmeurer): Consider falling back to IC here if deoptimization is
-  // disabled.
-  int const exit_control_count = static_cast<int>(exit_controls.size());
-  Node* exit_control =
-      (exit_control_count == 1)
-          ? exit_controls.front()
-          : graph()->NewNode(common()->Merge(exit_control_count),
-                             exit_control_count, &exit_controls.front());
-  Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                       frame_state, exit_effect, exit_control);
-  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-  Revisit(graph()->end());
+  DCHECK_NULL(fallthrough_control);
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -562,17 +527,10 @@
   ZoneVector<Node*> effects(zone());
   ZoneVector<Node*> controls(zone());
 
-  // The list of "exiting" controls, which currently go to a single deoptimize.
-  // TODO(bmeurer): Consider using an IC as fallback.
-  Node* const exit_effect = effect;
-  ZoneVector<Node*> exit_controls(zone());
-
   // Ensure that {receiver} is a heap object.
   Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-  exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
-  control = graph()->NewNode(common()->IfFalse(), branch);
+  control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                             effect, control);
 
   // Load the {receiver} map. The resulting effect is the dominating effect for
   // all (polymorphic) branches.
@@ -582,7 +540,8 @@
 
   // Generate code for the various different element access patterns.
   Node* fallthrough_control = control;
-  for (ElementAccessInfo const& access_info : access_infos) {
+  for (size_t j = 0; j < access_infos.size(); ++j) {
+    ElementAccessInfo const& access_info = access_infos[j];
     Node* this_receiver = receiver;
     Node* this_value = value;
     Node* this_index = index;
@@ -595,35 +554,61 @@
     {
       ZoneVector<Node*> this_controls(zone());
       ZoneVector<Node*> this_effects(zone());
+      size_t num_transitions = access_info.transitions().size();
+      int num_classes = access_info.receiver_type()->NumClasses();
       for (auto i = access_info.receiver_type()->Classes(); !i.Done();
            i.Advance()) {
+        DCHECK_LT(0, num_classes);
         Handle<Map> map = i.Current();
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
                              receiver_map, jsgraph()->Constant(map));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
-        this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+        if (--num_classes == 0 && num_transitions == 0 &&
+            j == access_infos.size() - 1) {
+          // Last map check on the fallthrough control path, do a conditional
+          // eager deoptimization exit here.
+          // TODO(turbofan): This is ugly as hell! We should probably introduce
+          // macro-ish operators for property access that encapsulate this whole
+          // mess.
+          this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
+                                                   check, frame_state, effect,
+                                                   fallthrough_control));
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+        }
         this_effects.push_back(effect);
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
         if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
       }
 
       // Generate possible elements kind transitions.
       for (auto transition : access_info.transitions()) {
+        DCHECK_LT(0u, num_transitions);
         Handle<Map> transition_source = transition.first;
         Handle<Map> transition_target = transition.second;
+        Node* transition_control;
+        Node* transition_effect = effect;
 
         // Check if {receiver} has the specified {transition_source} map.
         Node* check = graph()->NewNode(
             simplified()->ReferenceEqual(Type::Any()), receiver_map,
             jsgraph()->HeapConstant(transition_source));
-        Node* branch =
-            graph()->NewNode(common()->Branch(), check, fallthrough_control);
+        if (--num_transitions == 0 && j == access_infos.size() - 1) {
+          transition_control =
+              graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+                               transition_effect, fallthrough_control);
+          fallthrough_control = nullptr;
+        } else {
+          Node* branch =
+              graph()->NewNode(common()->Branch(), check, fallthrough_control);
+          fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+          transition_control = graph()->NewNode(common()->IfTrue(), branch);
+        }
 
         // Migrate {receiver} from {transition_source} to {transition_target}.
-        Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
-        Node* transition_effect = effect;
         if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
                                         transition_target->elements_kind())) {
           // In-place migration, just store the {transition_target} map.
@@ -647,8 +632,6 @@
         }
         this_controls.push_back(transition_control);
         this_effects.push_back(transition_effect);
-
-        fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
       }
 
       // Create single chokepoint for the control.
@@ -679,10 +662,8 @@
     if (!NumberMatcher(this_index).HasValue()) {
       Node* check =
           graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check, this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
       this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
                                     this_control);
     }
@@ -694,10 +675,8 @@
           graph()->NewNode(simplified()->NumberToUint32(), this_index);
       Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
                                      this_index);
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check, this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
       this_index = this_index32;
     }
 
@@ -716,13 +695,11 @@
       Node* this_elements_map = this_effect =
           graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
                            this_elements, this_effect, this_control);
-      check = graph()->NewNode(
+      Node* check = graph()->NewNode(
           simplified()->ReferenceEqual(Type::Any()), this_elements_map,
           jsgraph()->HeapConstant(factory()->fixed_array_map()));
-      branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
-                                this_control);
-      exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-      this_control = graph()->NewNode(common()->IfTrue(), branch);
+      this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                      frame_state, this_effect, this_control);
     }
 
     // Load the length of the {receiver}.
@@ -739,10 +716,8 @@
     // Check that the {index} is in the valid range for the {receiver}.
     Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
                                    this_length);
-    Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
-                                    this_control);
-    exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-    this_control = graph()->NewNode(common()->IfTrue(), branch);
+    this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                    frame_state, this_effect, this_control);
 
     // Compute the element access.
     Type* element_type = Type::Any();
@@ -781,16 +756,16 @@
         Node* check =
             graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
                              this_value, jsgraph()->TheHoleConstant());
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                        check, this_control);
-        Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
         // Check if we are allowed to turn the hole into undefined.
         Type* initial_holey_array_type = Type::Class(
             handle(isolate()->get_initial_js_array_map(elements_kind)),
             graph()->zone());
         if (receiver_type->NowIs(initial_holey_array_type) &&
             isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                          check, this_control);
+          Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+          Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
           // Add a code dependency on the array protector cell.
           AssumePrototypesStable(receiver_type, native_context,
                                  isolate()->initial_object_prototype());
@@ -805,8 +780,9 @@
               Type::Union(element_type, Type::Undefined(), graph()->zone());
         } else {
           // Deoptimize in case of the hole.
-          exit_controls.push_back(if_true);
-          this_control = if_false;
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
         }
         // Rename the result to represent the actual type (not polluted by the
         // hole).
@@ -833,29 +809,24 @@
               check, jsgraph()->UndefinedConstant(), this_value);
         } else {
           // Deoptimize in case of the hole.
-          Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                          check, this_control);
-          this_control = graph()->NewNode(common()->IfFalse(), branch);
-          exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+          this_control =
+              graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+                               this_effect, this_control);
         }
       }
     } else {
       DCHECK_EQ(AccessMode::kStore, access_mode);
       if (IsFastSmiElementsKind(elements_kind)) {
         Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
         this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
                                       this_value, this_control);
       } else if (IsFastDoubleElementsKind(elements_kind)) {
         Node* check =
             graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
-        Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                        check, this_control);
-        exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
-        this_control = graph()->NewNode(common()->IfTrue(), branch);
+        this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
+                                        frame_state, this_effect, this_control);
         this_value = graph()->NewNode(common()->Guard(Type::Number()),
                                       this_value, this_control);
       }
@@ -870,30 +841,7 @@
     controls.push_back(this_control);
   }
 
-  // Collect the fallthrough control as final "exit" control.
-  if (fallthrough_control != control) {
-    // Mark the last fallthrough branch as deferred.
-    MarkAsDeferred(fallthrough_control);
-  }
-  exit_controls.push_back(fallthrough_control);
-
-  // Generate the single "exit" point, where we get if either all map/instance
-  // type checks failed, or one of the assumptions inside one of the cases
-  // failes (i.e. failing prototype chain check).
-  // TODO(bmeurer): Consider falling back to IC here if deoptimization is
-  // disabled.
-  int const exit_control_count = static_cast<int>(exit_controls.size());
-  Node* exit_control =
-      (exit_control_count == 1)
-          ? exit_controls.front()
-          : graph()->NewNode(common()->Merge(exit_control_count),
-                             exit_control_count, &exit_controls.front());
-  Node* deoptimize =
-      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
-                       frame_state, exit_effect, exit_control);
-  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-  Revisit(graph()->end());
+  DCHECK_NULL(fallthrough_control);
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -1048,18 +996,6 @@
 }
 
 
-void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
-  Node* branch = NodeProperties::GetControlInput(if_projection);
-  DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
-  if (if_projection->opcode() == IrOpcode::kIfTrue) {
-    NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
-  } else {
-    DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
-    NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
-  }
-}
-
-
 MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
     Node* node) {
   Node* const context = NodeProperties::GetContextInput(node);
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 4251d72..5562c6e 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -85,10 +85,6 @@
                               Handle<Context> native_context,
                               Handle<JSObject> holder);
 
-  // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
-  // the dominating Branch that {if_projection} is the unlikely (deferred) case.
-  void MarkAsDeferred(Node* if_projection);
-
   // Retrieve the native context from the given {node} if known.
   MaybeHandle<Context> GetNativeContext(Node* node);
 
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 5fcd519..98e090b 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -346,7 +346,8 @@
 bool operator==(CreateLiteralParameters const& lhs,
                 CreateLiteralParameters const& rhs) {
   return lhs.constant().location() == rhs.constant().location() &&
-         lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
+         lhs.length() == rhs.length() && lhs.flags() == rhs.flags() &&
+         lhs.index() == rhs.index();
 }
 
 
@@ -357,12 +358,14 @@
 
 
 size_t hash_value(CreateLiteralParameters const& p) {
-  return base::hash_combine(p.constant().location(), p.flags(), p.index());
+  return base::hash_combine(p.constant().location(), p.length(), p.flags(),
+                            p.index());
 }
 
 
 std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
-  return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
+  return os << Brief(*p.constant()) << ", " << p.length() << ", " << p.flags()
+            << ", " << p.index();
 }
 
 
@@ -382,10 +385,12 @@
   V(GreaterThan, Operator::kNoProperties, 2, 1)            \
   V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
   V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)     \
-  V(ToNumber, Operator::kNoProperties, 1, 1)               \
-  V(ToString, Operator::kNoProperties, 1, 1)               \
+  V(ToInteger, Operator::kNoProperties, 1, 1)              \
+  V(ToLength, Operator::kNoProperties, 1, 1)               \
   V(ToName, Operator::kNoProperties, 1, 1)                 \
+  V(ToNumber, Operator::kNoProperties, 1, 1)               \
   V(ToObject, Operator::kNoProperties, 1, 1)               \
+  V(ToString, Operator::kNoProperties, 1, 1)               \
   V(Yield, Operator::kNoProperties, 1, 1)                  \
   V(Create, Operator::kEliminatable, 2, 1)                 \
   V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
@@ -733,12 +738,11 @@
       parameters);                                         // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateLiteralArray(
-    Handle<FixedArray> constant_elements, int literal_flags,
-    int literal_index) {
-  CreateLiteralParameters parameters(constant_elements, literal_flags,
-                                     literal_index);
+    Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
+    int number_of_elements) {
+  CreateLiteralParameters parameters(constant_elements, number_of_elements,
+                                     literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(        // --
       IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties,  // opcode
       "JSCreateLiteralArray",                                    // name
@@ -746,12 +750,11 @@
       parameters);                                               // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateLiteralObject(
     Handle<FixedArray> constant_properties, int literal_flags,
-    int literal_index) {
-  CreateLiteralParameters parameters(constant_properties, literal_flags,
-                                     literal_index);
+    int literal_index, int number_of_properties) {
+  CreateLiteralParameters parameters(constant_properties, number_of_properties,
+                                     literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(         // --
       IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties,  // opcode
       "JSCreateLiteralObject",                                    // name
@@ -762,7 +765,7 @@
 
 const Operator* JSOperatorBuilder::CreateLiteralRegExp(
     Handle<String> constant_pattern, int literal_flags, int literal_index) {
-  CreateLiteralParameters parameters(constant_pattern, literal_flags,
+  CreateLiteralParameters parameters(constant_pattern, -1, literal_flags,
                                      literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(         // --
       IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties,  // opcode
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 070e71e..eb323c9 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -350,15 +350,18 @@
 // JSCreateLiteralRegExp operators.
 class CreateLiteralParameters final {
  public:
-  CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
-      : constant_(constant), flags_(flags), index_(index) {}
+  CreateLiteralParameters(Handle<HeapObject> constant, int length, int flags,
+                          int index)
+      : constant_(constant), length_(length), flags_(flags), index_(index) {}
 
   Handle<HeapObject> constant() const { return constant_; }
+  int length() const { return length_; }
   int flags() const { return flags_; }
   int index() const { return index_; }
 
  private:
   Handle<HeapObject> const constant_;
+  int const length_;
   int const flags_;
   int const index_;
 };
@@ -401,10 +404,12 @@
   const Operator* Modulus(BinaryOperationHints hints);
 
   const Operator* ToBoolean(ToBooleanHints hints);
-  const Operator* ToNumber();
-  const Operator* ToString();
+  const Operator* ToInteger();
+  const Operator* ToLength();
   const Operator* ToName();
+  const Operator* ToNumber();
   const Operator* ToObject();
+  const Operator* ToString();
   const Operator* Yield();
 
   const Operator* Create();
@@ -414,9 +419,11 @@
                                 PretenureFlag pretenure);
   const Operator* CreateIterResultObject();
   const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
-                                     int literal_flags, int literal_index);
+                                     int literal_flags, int literal_index,
+                                     int number_of_elements);
   const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
-                                      int literal_flags, int literal_index);
+                                      int literal_flags, int literal_index,
+                                      int number_of_properties);
   const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
                                       int literal_flags, int literal_index);
 
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 11ae3a9..7e1a0dc 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -533,15 +533,11 @@
     return r.ChangeToPureOperator(
         simplified()->ReferenceEqual(Type::Receiver()), invert);
   }
-  if (r.OneInputIs(Type::NullOrUndefined())) {
-    Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
-    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
-    node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
-    node->InsertInput(graph()->zone(), 0,
-                      jsgraph()->HeapConstant(callable.code()));
-    NodeProperties::ChangeOp(node, common()->Call(desc));
+  if (r.OneInputIs(Type::Undetectable())) {
+    RelaxEffectsAndControls(node);
+    node->RemoveInput(r.LeftInputIs(Type::Undetectable()) ? 0 : 1);
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->ObjectIsUndetectable());
     if (invert) {
       // Insert an boolean not to invert the value.
       Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
@@ -648,6 +644,51 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+    // JSToInteger(x:integer) => x
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  Type* input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZero)) {
+    if (input_type->Max() <= 0.0) {
+      input = jsgraph()->ZeroConstant();
+    } else if (input_type->Min() >= kMaxSafeInteger) {
+      input = jsgraph()->Constant(kMaxSafeInteger);
+    } else {
+      if (input_type->Min() <= 0.0) {
+        input = graph()->NewNode(
+            common()->Select(MachineRepresentation::kTagged),
+            graph()->NewNode(simplified()->NumberLessThanOrEqual(), input,
+                             jsgraph()->ZeroConstant()),
+            jsgraph()->ZeroConstant(), input);
+        input_type = Type::Range(0.0, input_type->Max(), graph()->zone());
+        NodeProperties::SetType(input, input_type);
+      }
+      if (input_type->Max() > kMaxSafeInteger) {
+        input = graph()->NewNode(
+            common()->Select(MachineRepresentation::kTagged),
+            graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+                             jsgraph()->Constant(kMaxSafeInteger), input),
+            jsgraph()->Constant(kMaxSafeInteger), input);
+        input_type =
+            Type::Range(input_type->Min(), kMaxSafeInteger, graph()->zone());
+        NodeProperties::SetType(input, input_type);
+      }
+    }
+    ReplaceWithValue(node, input);
+    return Replace(input);
+  }
+  return NoChange();
+}
 
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToNumber) {
@@ -699,7 +740,10 @@
     // JSToNumber(x:boolean) => BooleanToNumber(x)
     return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
   }
-  // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
+  if (input_type->Is(Type::String())) {
+    // JSToNumber(x:string) => StringToNumber(x)
+    return Replace(graph()->NewNode(simplified()->StringToNumber(), input));
+  }
   return NoChange();
 }
 
@@ -1684,6 +1728,10 @@
       return ReduceJSModulus(node);
     case IrOpcode::kJSToBoolean:
       return ReduceJSToBoolean(node);
+    case IrOpcode::kJSToInteger:
+      return ReduceJSToInteger(node);
+    case IrOpcode::kJSToLength:
+      return ReduceJSToLength(node);
     case IrOpcode::kJSToNumber:
       return ReduceJSToNumber(node);
     case IrOpcode::kJSToString:
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 4621a45..1517871 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -62,6 +62,8 @@
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
   Reduction ReduceJSToBoolean(Node* node);
+  Reduction ReduceJSToInteger(Node* node);
+  Reduction ReduceJSToLength(Node* node);
   Reduction ReduceJSToNumberInput(Node* input);
   Reduction ReduceJSToNumber(Node* node);
   Reduction ReduceJSToStringInput(Node* input);
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index 5abd346..5554282 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -96,7 +96,9 @@
           // the frame at start. So we should move the decision of whether
           // to build a frame or not in the register allocator, and trickle it
           // here and to the code generator.
-          if (frame_at_start || !block->must_deconstruct_frame()) {
+          if (frame_at_start ||
+              !(block->must_deconstruct_frame() ||
+                block->must_construct_frame())) {
             fw = code->InputRpo(instr, 0);
           }
           fallthru = false;
@@ -141,7 +143,7 @@
                                     InstructionSequence* code) {
   if (!FLAG_turbo_jt) return;
 
-  Zone local_zone;
+  Zone local_zone(code->isolate()->allocator());
   ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
 
   // Skip empty blocks when the previous block doesn't fall through.
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index d4a3665..105bd35 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -160,12 +160,17 @@
     case Runtime::kPushCatchContext:
     case Runtime::kReThrow:
     case Runtime::kStringCompare:
-    case Runtime::kStringEquals:
-    case Runtime::kToFastProperties:  // TODO(jarin): Is it safe?
+    case Runtime::kStringEqual:
+    case Runtime::kStringNotEqual:
+    case Runtime::kStringLessThan:
+    case Runtime::kStringLessThanOrEqual:
+    case Runtime::kStringGreaterThan:
+    case Runtime::kStringGreaterThanOrEqual:
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return 0;
     case Runtime::kInlineGetPrototype:
+    case Runtime::kInlineNewObject:
     case Runtime::kInlineRegExpConstructResult:
     case Runtime::kInlineRegExpExec:
     case Runtime::kInlineSubString:
@@ -174,13 +179,12 @@
     case Runtime::kInlineToName:
     case Runtime::kInlineToNumber:
     case Runtime::kInlineToObject:
+    case Runtime::kInlineToPrimitive:
     case Runtime::kInlineToPrimitive_Number:
     case Runtime::kInlineToPrimitive_String:
-    case Runtime::kInlineToPrimitive:
     case Runtime::kInlineToString:
       return 1;
     case Runtime::kInlineCall:
-    case Runtime::kInlineTailCall:
     case Runtime::kInlineDeoptimizeNow:
     case Runtime::kInlineThrowNotDateError:
       return 2;
@@ -319,8 +323,9 @@
   MachineType target_type = MachineType::AnyTagged();
   // When entering into an OSR function from unoptimized code the JSFunction
   // is not in a register, but it is on the stack in the marker spill slot.
-  LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
-                                      : regloc(kJSFunctionRegister);
+  LinkageLocation target_loc = is_osr
+                                   ? LinkageLocation::ForSavedCallerFunction()
+                                   : regloc(kJSFunctionRegister);
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallJSFunction,  // kind
       target_type,                      // target MachineType
@@ -401,7 +406,8 @@
       properties,                       // properties
       kNoCalleeSaved,                   // callee-saved registers
       kNoCalleeSaved,                   // callee-saved fp
-      flags,                            // flags
+      CallDescriptor::kCanUseRoots |    // flags
+          flags,                        // flags
       descriptor.DebugName(isolate));
 }
 
@@ -431,7 +437,7 @@
 
 
 bool Linkage::ParameterHasSecondaryLocation(int index) const {
-  if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+  if (!incoming_->IsJSFunctionCall()) return false;
   LinkageLocation loc = GetParameterLocation(index);
   return (loc == regloc(kJSFunctionRegister) ||
           loc == regloc(kContextRegister));
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 3012f56..a0434f8 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -76,9 +76,9 @@
                               kPointerSize);
   }
 
-  static LinkageLocation ForSavedCallerMarker() {
+  static LinkageLocation ForSavedCallerFunction() {
     return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
-                               StandardFrameConstants::kMarkerOffset) /
+                               StandardFrameConstants::kFunctionOffset) /
                               kPointerSize);
   }
 
@@ -160,10 +160,11 @@
     kCanUseRoots = 1u << 6,
     // (arm64 only) native stack should be used for arguments.
     kUseNativeStack = 1u << 7,
-    // (arm64 only) call instruction has to restore JSSP.
+    // (arm64 only) call instruction has to restore JSSP or CSP.
     kRestoreJSSP = 1u << 8,
+    kRestoreCSP = 1u << 9,
     // Causes the code generator to initialize the root register.
-    kInitializeRootRegister = 1u << 9,
+    kInitializeRootRegister = 1u << 10,
     kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
   };
   typedef base::Flags<Flag> Flags;
@@ -367,6 +368,11 @@
   // Get the location where an incoming OSR value is stored.
   LinkageLocation GetOsrValueLocation(int index) const;
 
+  // A special {Parameter} index for Stub Calls that represents context.
+  static int GetStubCallContextParamIndex(int parameter_count) {
+    return parameter_count + 0;  // Parameter (arity + 0) is special.
+  }
+
   // A special {Parameter} index for JSCalls that represents the new target.
   static int GetJSCallNewTargetParamIndex(int parameter_count) {
     return parameter_count + 0;  // Parameter (arity + 0) is special.
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index 97f1ab0..e19368d 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,8 +4,11 @@
 
 #include "src/compiler/load-elimination.h"
 
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -13,7 +16,6 @@
 
 LoadElimination::~LoadElimination() {}
 
-
 Reduction LoadElimination::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kLoadField:
@@ -24,7 +26,6 @@
   return NoChange();
 }
 
-
 Reduction LoadElimination::ReduceLoadField(Node* node) {
   DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
   FieldAccess const access = FieldAccessOf(node->op());
@@ -45,8 +46,22 @@
         if (access == FieldAccessOf(effect->op())) {
           if (object == NodeProperties::GetValueInput(effect, 0)) {
             Node* const value = NodeProperties::GetValueInput(effect, 1);
-            ReplaceWithValue(node, value);
-            return Replace(value);
+            Type* stored_value_type = NodeProperties::GetType(value);
+            Type* load_type = NodeProperties::GetType(node);
+            // Make sure the replacement's type is a subtype of the node's
+            // type. Otherwise we could confuse optimizations that were
+            // based on the original type.
+            if (stored_value_type->Is(load_type)) {
+              ReplaceWithValue(node, value);
+              return Replace(value);
+            } else {
+              Node* renamed = graph()->NewNode(
+                  common()->Guard(Type::Intersect(stored_value_type, load_type,
+                                                  graph()->zone())),
+                  value, NodeProperties::GetControlInput(node));
+              ReplaceWithValue(node, renamed);
+              return Replace(renamed);
+            }
           }
           // TODO(turbofan): Alias analysis to the rescue?
           return NoChange();
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index db87d9a..92c6dd0 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -11,15 +11,25 @@
 namespace internal {
 namespace compiler {
 
+class CommonOperatorBuilder;
+class Graph;
+
 class LoadElimination final : public AdvancedReducer {
  public:
-  explicit LoadElimination(Editor* editor) : AdvancedReducer(editor) {}
+  explicit LoadElimination(Editor* editor, Graph* graph,
+                           CommonOperatorBuilder* common)
+      : AdvancedReducer(editor), graph_(graph), common_(common) {}
   ~LoadElimination() final;
 
   Reduction Reduce(Node* node) final;
 
  private:
+  CommonOperatorBuilder* common() const { return common_; }
+  Graph* graph() { return graph_; }
+
   Reduction ReduceLoadField(Node* node);
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h
index 2ed5bc2..b8bc395 100644
--- a/src/compiler/loop-analysis.h
+++ b/src/compiler/loop-analysis.h
@@ -116,6 +116,8 @@
     return nullptr;
   }
 
+  Zone* zone() const { return zone_; }
+
  private:
   friend class LoopFinderImpl;
 
diff --git a/src/compiler/loop-peeling.cc b/src/compiler/loop-peeling.cc
index b553a9f..5379596 100644
--- a/src/compiler/loop-peeling.cc
+++ b/src/compiler/loop-peeling.cc
@@ -184,7 +184,7 @@
 
 
 bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
-  Zone zone;
+  Zone zone(loop_tree->zone()->allocator());
   NodeVector exits(&zone);
   NodeVector rets(&zone);
   FindLoopExits(loop_tree, loop, exits, rets);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 3b6f21b..6a506d2 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -148,6 +148,7 @@
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
   V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)              \
@@ -195,7 +196,13 @@
   V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
   V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
   V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)                 \
+  V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Int32PairSub, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Int32PairMul, Operator::kNoProperties, 4, 0, 2)                           \
+  V(Word32PairShl, Operator::kNoProperties, 3, 0, 2)                          \
+  V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                          \
+  V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -467,6 +474,19 @@
   return nullptr;
 }
 
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64PopcntPlaceholder() {
+  return &cache_.kWord64Popcnt;
+}
+
+// On 32 bit platforms we need to get a reference to optional operators of
+// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
+// don't support the original 64-bit instruction.
+const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
+  return &cache_.kWord64Ctz;
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index c5a80aa..68e393a 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -164,6 +164,7 @@
   const OptionalOperator Word32Ctz();
   const OptionalOperator Word32Popcnt();
   const OptionalOperator Word64Popcnt();
+  const Operator* Word64PopcntPlaceholder();
   const OptionalOperator Word32ReverseBits();
   const OptionalOperator Word64ReverseBits();
   bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
@@ -177,8 +178,16 @@
   const Operator* Word64Ror();
   const Operator* Word64Clz();
   const OptionalOperator Word64Ctz();
+  const Operator* Word64CtzPlaceholder();
   const Operator* Word64Equal();
 
+  const Operator* Int32PairAdd();
+  const Operator* Int32PairSub();
+  const Operator* Int32PairMul();
+  const Operator* Word32PairShl();
+  const Operator* Word32PairShr();
+  const Operator* Word32PairSar();
+
   const Operator* Int32Add();
   const Operator* Int32AddWithOverflow();
   const Operator* Int32Sub();
@@ -219,6 +228,7 @@
   const Operator* ChangeFloat32ToFloat64();
   const Operator* ChangeFloat64ToInt32();   // narrowing
   const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* TruncateFloat64ToUint32();
   const Operator* TruncateFloat32ToInt32();
   const Operator* TruncateFloat32ToUint32();
   const Operator* TryTruncateFloat32ToInt64();
@@ -340,6 +350,7 @@
   V(Word, Shr)            \
   V(Word, Sar)            \
   V(Word, Ror)            \
+  V(Word, Clz)            \
   V(Word, Equal)          \
   V(Int, Add)             \
   V(Int, Sub)             \
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index cdd7e34..9b0d706 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -120,8 +120,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -221,7 +224,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore ra if the frame was elided.
       __ Push(ra);
     }
@@ -243,7 +247,7 @@
                          remembered_set_action, save_fp_mode);
     __ Addu(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(ra);
     }
   }
@@ -255,6 +259,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -467,6 +472,13 @@
     __ bind(&done);                                                           \
   }
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -482,20 +494,44 @@
     __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Branch(&done, ne, scratch1,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ lw(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -509,9 +545,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -537,6 +579,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -547,6 +590,11 @@
 
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -606,7 +654,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ lw(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -875,6 +923,14 @@
     case kMipsCmpD:
       // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
       break;
+    case kMipsMulPair: {
+      __ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
+              i.InputRegister(2));
+      __ mul(kScratchReg, i.InputRegister(0), i.InputRegister(3));
+      __ mul(kScratchReg2, i.InputRegister(1), i.InputRegister(2));
+      __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg);
+      __ Addu(i.OutputRegister(1), i.OutputRegister(1), kScratchReg2);
+    } break;
     case kMipsAddD:
       // TODO(plind): add special case: combine mult & add.
       __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1525,17 +1581,16 @@
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   int stack_shrink_slots = frame()->GetSpillSlotCount();
-  if (descriptor->IsCFunctionCall()) {
-    __ Push(ra, fp);
-    __ mov(fp, sp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(ra, fp);
+      __ mov(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1597,17 +1652,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(sp, fp);
-    __ Pop(ra, fp);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ Branch(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(sp, fp);
-      __ Pop(ra, fp);
+      AssembleDeconstructFrame();
     }
   }
   if (pop_count != 0) {
@@ -1665,9 +1718,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ lw(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ lw(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 64aecd0..d85c2a7 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -59,6 +59,7 @@
   V(MipsSqrtD)                     \
   V(MipsMaxD)                      \
   V(MipsMinD)                      \
+  V(MipsMulPair)                   \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
   V(MipsFloat32RoundUp)            \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index df972f7..f86ffe7 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -114,8 +114,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -189,9 +194,7 @@
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
     inputs[input_count++] = g.UseUniqueRegister(index);
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -392,6 +395,27 @@
   VisitRRO(this, kMipsSar, node);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  MipsOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+  Emit(kMipsMulPair, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitRRO(this, kMipsRor, node);
@@ -612,6 +636,9 @@
   VisitRR(this, kMipsTruncUwD, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kMipsTruncUwD, node);
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   MipsOperandGenerator g(this);
@@ -878,6 +905,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -981,6 +1009,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1086,9 +1117,6 @@
   VisitWordCompare(selector, node, kMipsCmp, cont, false);
 }
 
-}  // namespace
-
-
 // Shared routine for word comparisons against zero.
 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
                           Node* value, FlagsContinuation* cont) {
@@ -1177,12 +1205,17 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+                             g.TempImmediate(0), cont->frame_state());
   } else {
+    DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
   }
 }
 
+}  // namespace
 
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
@@ -1190,6 +1223,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   MipsOperandGenerator g(this);
@@ -1220,7 +1264,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1230,32 +1274,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMipsAddOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1265,7 +1311,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMipsSubOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1274,37 +1320,39 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 373a1a6..c6341b1 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -120,8 +120,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -221,7 +224,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,7 +239,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore ra if the frame was elided.
       __ Push(ra);
     }
@@ -243,7 +247,7 @@
                          remembered_set_action, save_fp_mode);
     __ Daddu(scratch1_, object_, index_);
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       __ Pop(ra);
     }
   }
@@ -255,6 +259,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -479,6 +484,13 @@
     __ bind(&done);                                                           \
   }
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(sp, fp);
+  __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
   if (sp_slot_delta > 0) {
@@ -494,20 +506,44 @@
     __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ Branch(&done, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ ld(caller_args_count_reg,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -521,9 +557,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (instr->InputAt(0)->IsImmediate()) {
         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -548,6 +590,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -557,6 +600,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(at);
       frame_access_state()->ClearSPDelta();
@@ -616,7 +664,7 @@
       __ mov(i.OutputRegister(), fp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ ld(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mov(i.OutputRegister(), fp);
@@ -1831,17 +1879,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ Push(ra, fp);
-    __ mov(fp, sp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(ra, fp);
+      __ mov(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1900,17 +1947,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(sp, fp);
-    __ Pop(ra, fp);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ Branch(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(sp, fp);
-      __ Pop(ra, fp);
+      AssembleDeconstructFrame();
     }
   }
   int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -1969,9 +2014,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ ld(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ ld(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 44a5470..5e2b5f2 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -119,8 +119,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -196,9 +201,7 @@
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
     inputs[input_count++] = g.UseUniqueRegister(index);
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -922,6 +925,9 @@
   VisitRR(this, kMips64TruncUwD, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kMips64TruncUwD, node);
+}
 
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Mips64OperandGenerator g(this);
@@ -1320,6 +1326,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitCheckedLoad(Node* node) {
   CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -1431,6 +1438,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1542,7 +1552,6 @@
   VisitWordCompare(selector, node, kMips64Cmp, cont, false);
 }
 
-}  // namespace
 
 
 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
@@ -1553,6 +1562,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
+                             g.TempImmediate(0), cont->frame_state());
   } else {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
@@ -1677,6 +1689,7 @@
   EmitWordCompareZero(selector, value, cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
@@ -1684,6 +1697,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   Mips64OperandGenerator g(this);
@@ -1714,7 +1738,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1725,32 +1749,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64Dadd, &cont);
   }
   FlagsContinuation cont;
@@ -1760,7 +1786,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64Dsub, &cont);
   }
   FlagsContinuation cont;
@@ -1770,7 +1796,7 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64DaddOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1780,7 +1806,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kMips64DsubOvf, &cont);
   }
   FlagsContinuation cont;
@@ -1789,7 +1815,7 @@
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1800,61 +1826,65 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index c78e15e..b038d15 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -21,6 +21,8 @@
   V(IfDefault)             \
   V(Merge)                 \
   V(Deoptimize)            \
+  V(DeoptimizeIf)          \
+  V(DeoptimizeUnless)      \
   V(Return)                \
   V(TailCall)              \
   V(Terminate)             \
@@ -94,10 +96,12 @@
 
 #define JS_CONVERSION_UNOP_LIST(V) \
   V(JSToBoolean)                   \
-  V(JSToNumber)                    \
-  V(JSToString)                    \
+  V(JSToInteger)                   \
+  V(JSToLength)                    \
   V(JSToName)                      \
-  V(JSToObject)
+  V(JSToNumber)                    \
+  V(JSToObject)                    \
+  V(JSToString)
 
 #define JS_OTHER_UNOP_LIST(V) \
   V(JSTypeOf)
@@ -181,10 +185,17 @@
   V(NumberShiftLeft)               \
   V(NumberShiftRight)              \
   V(NumberShiftRightLogical)       \
+  V(NumberImul)                    \
+  V(NumberClz32)                   \
+  V(NumberCeil)                    \
+  V(NumberFloor)                   \
+  V(NumberRound)                   \
+  V(NumberTrunc)                   \
   V(NumberToInt32)                 \
   V(NumberToUint32)                \
   V(NumberIsHoleNaN)               \
   V(PlainPrimitiveToNumber)        \
+  V(StringToNumber)                \
   V(ChangeTaggedToInt32)           \
   V(ChangeTaggedToUint32)          \
   V(ChangeTaggedToFloat64)         \
@@ -202,7 +213,8 @@
   V(StoreElement)                  \
   V(ObjectIsNumber)                \
   V(ObjectIsReceiver)              \
-  V(ObjectIsSmi)
+  V(ObjectIsSmi)                   \
+  V(ObjectIsUndetectable)
 
 // Opcodes for Machine-level operators.
 #define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -273,6 +285,7 @@
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
+  V(TruncateFloat64ToUint32)    \
   V(TruncateFloat32ToInt32)     \
   V(TruncateFloat32ToUint32)    \
   V(TryTruncateFloat32ToInt64)  \
@@ -330,7 +343,13 @@
   V(LoadFramePointer)           \
   V(LoadParentFramePointer)     \
   V(CheckedLoad)                \
-  V(CheckedStore)
+  V(CheckedStore)               \
+  V(Int32PairAdd)               \
+  V(Int32PairSub)               \
+  V(Int32PairMul)               \
+  V(Word32PairShl)              \
+  V(Word32PairShr)              \
+  V(Word32PairSar)
 
 #define VALUE_OP_LIST(V) \
   COMMON_OP_LIST(V)      \
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 1ee31d5..7f38ca7 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -58,6 +58,8 @@
     case IrOpcode::kJSCreateScriptContext:
 
     // Conversions
+    case IrOpcode::kJSToInteger:
+    case IrOpcode::kJSToLength:
     case IrOpcode::kJSToName:
     case IrOpcode::kJSToNumber:
     case IrOpcode::kJSToObject:
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 21c34fc..1d7e967 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -267,23 +267,26 @@
     register_allocation_data_ = nullptr;
   }
 
-  void InitializeInstructionSequence() {
+  void InitializeInstructionSequence(const CallDescriptor* descriptor) {
     DCHECK(sequence_ == nullptr);
     InstructionBlocks* instruction_blocks =
         InstructionSequence::InstructionBlocksFor(instruction_zone(),
                                                   schedule());
     sequence_ = new (instruction_zone()) InstructionSequence(
         info()->isolate(), instruction_zone(), instruction_blocks);
+    if (descriptor && descriptor->RequiresFrameAsIncoming()) {
+      sequence_->instruction_blocks()[0]->mark_needs_frame();
+    } else {
+      DCHECK_EQ(0, descriptor->CalleeSavedFPRegisters());
+      DCHECK_EQ(0, descriptor->CalleeSavedRegisters());
+    }
   }
 
   void InitializeFrameData(CallDescriptor* descriptor) {
     DCHECK(frame_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
-      fixed_frame_size = (descriptor->IsCFunctionCall())
-                             ? StandardFrameConstants::kFixedSlotCountAboveFp +
-                                   StandardFrameConstants::kCPSlotCount
-                             : StandardFrameConstants::kFixedSlotCount;
+      fixed_frame_size = CalculateFixedFrameSize(descriptor);
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
   }
@@ -338,6 +341,16 @@
   Zone* register_allocation_zone_;
   RegisterAllocationData* register_allocation_data_;
 
+  int CalculateFixedFrameSize(CallDescriptor* descriptor) {
+    if (descriptor->IsJSFunctionCall()) {
+      return StandardFrameConstants::kFixedSlotCount;
+    }
+    return descriptor->IsCFunctionCall()
+               ? (CommonFrameConstants::kFixedSlotCountAboveFp +
+                  CommonFrameConstants::kCPSlotCount)
+               : TypedFrameConstants::kFixedSlotCount;
+  }
+
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -539,7 +552,7 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+    JSCallReducer call_reducer(data->jsgraph(),
                                data->info()->is_deoptimization_enabled()
                                    ? JSCallReducer::kDeoptimizationEnabled
                                    : JSCallReducer::kNoFlags,
@@ -615,7 +628,8 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
-    LoadElimination load_elimination(&graph_reducer);
+    LoadElimination load_elimination(&graph_reducer, data->graph(),
+                                     data->common());
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
     MaybeHandle<LiteralsArray> literals_array =
         data->info()->is_native_context_specializing()
@@ -639,6 +653,7 @@
         data->info()->is_deoptimization_enabled()
             ? JSIntrinsicLowering::kDeoptimizationEnabled
             : JSIntrinsicLowering::kDeoptimizationDisabled);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -649,6 +664,7 @@
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
+    AddReducer(data, &graph_reducer, &simple_reducer);
     AddReducer(data, &graph_reducer, &common_reducer);
     graph_reducer.ReduceGraph();
   }
@@ -1079,7 +1095,7 @@
 
 
 Handle<Code> Pipeline::GenerateCode() {
-  ZonePool zone_pool;
+  ZonePool zone_pool(isolate()->allocator());
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
 
   if (FLAG_turbo_stats) {
@@ -1240,7 +1256,7 @@
   CompilationInfo info(debug_name, isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool;
+  ZonePool zone_pool(isolate->allocator());
   PipelineData data(&zone_pool, &info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats) {
@@ -1281,7 +1297,7 @@
                                               Graph* graph,
                                               Schedule* schedule) {
   // Construct a pipeline for scheduling and code generation.
-  ZonePool zone_pool;
+  ZonePool zone_pool(info->isolate()->allocator());
   PipelineData data(&zone_pool, info, graph, schedule);
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats) {
@@ -1304,7 +1320,7 @@
                                            InstructionSequence* sequence,
                                            bool run_verifier) {
   CompilationInfo info("testing", sequence->isolate(), sequence->zone());
-  ZonePool zone_pool;
+  ZonePool zone_pool(sequence->isolate()->allocator());
   PipelineData data(&zone_pool, &info, sequence);
   Pipeline pipeline(&info);
   pipeline.data_ = &data;
@@ -1329,7 +1345,7 @@
                                                        data->schedule());
   }
 
-  data->InitializeInstructionSequence();
+  data->InitializeInstructionSequence(call_descriptor);
 
   data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
@@ -1358,6 +1374,7 @@
   AllocateRegisters(
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
       call_descriptor, run_verifier);
+  Run<FrameElisionPhase>();
   if (data->compilation_failed()) {
     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
     return Handle<Code>();
@@ -1366,11 +1383,7 @@
   BeginPhaseKind("code generation");
   // TODO(mtrofin): move this off to the register allocator.
   bool generate_frame_at_start =
-      !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
-      !data_->frame()->needs_frame() ||
-      data_->sequence()->instruction_blocks().front()->needs_frame() ||
-      linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
-      linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
+      data_->sequence()->instruction_blocks().front()->must_construct_frame();
   // Optimimize jumps.
   if (FLAG_turbo_jt) {
     Run<JumpThreadingPhase>(generate_frame_at_start);
@@ -1430,7 +1443,7 @@
   base::SmartPointer<Zone> verifier_zone;
   RegisterAllocatorVerifier* verifier = nullptr;
   if (run_verifier) {
-    verifier_zone.Reset(new Zone());
+    verifier_zone.Reset(new Zone(isolate()->allocator()));
     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
         verifier_zone.get(), config, data->sequence());
   }
@@ -1438,6 +1451,8 @@
   base::SmartArrayPointer<char> debug_name;
 #ifdef DEBUG
   debug_name = info()->GetDebugName();
+  data_->sequence()->ValidateEdgeSplitForm();
+  data_->sequence()->ValidateDeferredBlockExitPaths();
 #endif
 
   data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
@@ -1477,12 +1492,6 @@
     Run<MergeSplintersPhase>();
   }
 
-  // We plan to enable frame elision only for stubs and bytecode handlers.
-  if (FLAG_turbo_frame_elision && info()->IsStub()) {
-    Run<LocateSpillSlotsPhase>();
-    Run<FrameElisionPhase>();
-  }
-
   Run<AssignSpillSlotsPhase>();
 
   Run<CommitAssignmentPhase>();
@@ -1493,6 +1502,8 @@
     Run<OptimizeMovesPhase>();
   }
 
+  Run<LocateSpillSlotsPhase>();
+
   if (FLAG_trace_turbo_graph) {
     OFStream os(stdout);
     PrintableInstructionSequence printable = {config, data->sequence()};
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 7fc6dd9..6f1e588 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -32,6 +32,7 @@
   RCBit OutputRCBit() const {
     switch (instr_->flags_mode()) {
       case kFlags_branch:
+      case kFlags_deoptimize:
       case kFlags_set:
         return SetRC;
       case kFlags_none:
@@ -103,8 +104,11 @@
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK_NOT_NULL(op);
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
   }
 };
@@ -183,7 +187,8 @@
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
-        mode_(mode) {}
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
 
   void Generate() final {
     if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -197,7 +202,7 @@
                                              : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ mflr(scratch1_);
       __ Push(scratch1_);
@@ -211,7 +216,7 @@
       __ add(scratch1_, object_, offset_);
     }
     __ CallStub(&stub);
-    if (!frame()->needs_frame()) {
+    if (must_save_lr_) {
       // We need to save and restore lr if the frame was elided.
       __ Pop(scratch1_);
       __ mtlr(scratch1_);
@@ -226,6 +231,7 @@
   Register const scratch0_;
   Register const scratch1_;
   RecordWriteMode const mode_;
+  bool must_save_lr_;
 };
 
 
@@ -293,20 +299,24 @@
 
 }  // namespace
 
-#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr)                            \
+#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)                     \
   do {                                                               \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
                  i.OutputRCBit());                                   \
+    if (round) {                                                     \
+      __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
+    }                                                                \
   } while (0)
 
-
-#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr)                           \
+#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)                    \
   do {                                                               \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
                  i.InputDoubleRegister(1), i.OutputRCBit());         \
+    if (round) {                                                     \
+      __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
+    }                                                                \
   } while (0)
 
-
 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
   do {                                                         \
     if (HasRegisterInput(instr, 1)) {                          \
@@ -662,6 +672,11 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
   } while (0)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -678,12 +693,36 @@
     __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ RestoreFrameStateForTailCall();
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
@@ -708,9 +747,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (HasRegisterInput(instr, 0)) {
         __ addi(ip, i.InputRegister(0),
                 Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -745,6 +790,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -756,6 +802,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Jump(ip);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -823,7 +874,7 @@
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
       } else {
         __ mr(i.OutputRegister(), fp);
@@ -928,6 +979,71 @@
       ASSEMBLE_BINOP_INT_RC(srad, sradi);
       break;
 #endif
+#if !V8_TARGET_ARCH_PPC64
+    case kPPC_AddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_SubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
+    case kPPC_MulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
+      __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
+      __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
+      __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
+      __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
+      break;
+    case kPPC_ShiftLeftPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1),
+                         i.InputInt32(2));
+      } else {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                         i.InputRegister(2));
+      }
+      break;
+    case kPPC_ShiftRightPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1),
+                          i.InputInt32(2));
+      } else {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                          i.InputRegister(2));
+      }
+      break;
+    case kPPC_ShiftRightAlgPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+                             i.InputRegister(0), i.InputRegister(1),
+                             i.InputInt32(2));
+      } else {
+        __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+                             i.InputRegister(0), i.InputRegister(1),
+                             kScratchReg, i.InputRegister(2));
+      }
+      break;
+#endif
     case kPPC_RotRight32:
       if (HasRegisterInput(instr, 1)) {
         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
@@ -992,7 +1108,7 @@
       ASSEMBLE_ADD_WITH_OVERFLOW32();
       break;
     case kPPC_AddDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fadd);
+      ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Sub:
 #if V8_TARGET_ARCH_PPC64
@@ -1015,7 +1131,7 @@
       ASSEMBLE_SUB_WITH_OVERFLOW32();
       break;
     case kPPC_SubDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fsub);
+      ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mul32:
       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1036,7 +1152,7 @@
                 i.OutputRCBit());
       break;
     case kPPC_MulDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fmul);
+      ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Div32:
       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1059,7 +1175,7 @@
       break;
 #endif
     case kPPC_DivDouble:
-      ASSEMBLE_FLOAT_BINOP_RC(fdiv);
+      ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mod32:
       ASSEMBLE_MODULO(divw, mullw);
@@ -1092,25 +1208,25 @@
       ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
       break;
     case kPPC_AbsDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fabs);
+      ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
       break;
     case kPPC_SqrtDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
+      ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
       break;
     case kPPC_FloorDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frim);
+      ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
       break;
     case kPPC_CeilDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frip);
+      ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
       break;
     case kPPC_TruncateDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(friz);
+      ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
       break;
     case kPPC_RoundDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(frin);
+      ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
       break;
     case kPPC_NegDouble:
-      ASSEMBLE_FLOAT_UNOP_RC(fneg);
+      ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
       break;
     case kPPC_Cntlz32:
       __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
@@ -1316,7 +1432,7 @@
     }
 #endif
     case kPPC_DoubleToFloat32:
-      ASSEMBLE_FLOAT_UNOP_RC(frsp);
+      ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
       break;
     case kPPC_Float32ToDouble:
       // Nothing to do.
@@ -1589,36 +1705,36 @@
     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
 }
 
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ function_descriptor();
-    __ mflr(r0);
-    if (FLAG_enable_embedded_constant_pool) {
-      __ Push(r0, fp, kConstantPoolRegister);
-      // Adjust FP to point to saved FP.
-      __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ function_descriptor();
+      __ mflr(r0);
+      if (FLAG_enable_embedded_constant_pool) {
+        __ Push(r0, fp, kConstantPoolRegister);
+        // Adjust FP to point to saved FP.
+        __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      } else {
+        __ Push(r0, fp);
+        __ mr(fp, sp);
+      }
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
     } else {
-      __ Push(r0, fp);
-      __ mr(fp, sp);
+      StackFrame::Type type = info()->GetOutputStackFrameType();
+      // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+      // efficient intialization of the constant pool pointer register).
+      __ StubPrologue(type);
     }
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
-  } else if (frame()->needs_frame()) {
-    if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
-      // TODO(mbrandy): Restrict only to the wasm wrapper case.
-      __ StubPrologue();
-    } else {
-      __ StubPrologue(ip);
-    }
-  } else {
-    frame()->SetElidedFrameSizeInSlots(0);
   }
-  frame_access_state()->SetFrameAccessToDefault();
 
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
@@ -1687,20 +1803,18 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ b(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
+      AssembleDeconstructFrame();
     }
-  } else {
-    __ Drop(pop_count);
   }
-  __ Ret();
+  __ Ret(pop_count);
 }
 
 
@@ -1753,9 +1867,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ LoadP(dst, MemOperand(fp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ LoadP(dst, g.SlotToMemOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 877ebb5..66c2e99 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -19,10 +19,13 @@
   V(PPC_Xor)                       \
   V(PPC_ShiftLeft32)               \
   V(PPC_ShiftLeft64)               \
+  V(PPC_ShiftLeftPair)             \
   V(PPC_ShiftRight32)              \
   V(PPC_ShiftRight64)              \
+  V(PPC_ShiftRightPair)            \
   V(PPC_ShiftRightAlg32)           \
   V(PPC_ShiftRightAlg64)           \
+  V(PPC_ShiftRightAlgPair)         \
   V(PPC_RotRight32)                \
   V(PPC_RotRight64)                \
   V(PPC_Not)                       \
@@ -32,14 +35,17 @@
   V(PPC_RotLeftAndClearRight64)    \
   V(PPC_Add)                       \
   V(PPC_AddWithOverflow32)         \
+  V(PPC_AddPair)                   \
   V(PPC_AddDouble)                 \
   V(PPC_Sub)                       \
   V(PPC_SubWithOverflow32)         \
+  V(PPC_SubPair)                   \
   V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
+  V(PPC_MulPair)                   \
   V(PPC_MulDouble)                 \
   V(PPC_Div32)                     \
   V(PPC_Div64)                     \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index fd1df6a..e7d7719 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -21,10 +21,13 @@
     case kPPC_Xor:
     case kPPC_ShiftLeft32:
     case kPPC_ShiftLeft64:
+    case kPPC_ShiftLeftPair:
     case kPPC_ShiftRight32:
     case kPPC_ShiftRight64:
+    case kPPC_ShiftRightPair:
     case kPPC_ShiftRightAlg32:
     case kPPC_ShiftRightAlg64:
+    case kPPC_ShiftRightAlgPair:
     case kPPC_RotRight32:
     case kPPC_RotRight64:
     case kPPC_Not:
@@ -34,14 +37,17 @@
     case kPPC_RotLeftAndClearRight64:
     case kPPC_Add:
     case kPPC_AddWithOverflow32:
+    case kPPC_AddPair:
     case kPPC_AddDouble:
     case kPPC_Sub:
     case kPPC_SubWithOverflow32:
+    case kPPC_SubPair:
     case kPPC_SubDouble:
     case kPPC_Mul32:
     case kPPC_Mul64:
     case kPPC_MulHigh32:
     case kPPC_MulHighU32:
+    case kPPC_MulPair:
     case kPPC_MulDouble:
     case kPPC_Div32:
     case kPPC_Div64:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 244e6f4..5abb5f1 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -71,22 +71,22 @@
 
 namespace {
 
-void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRR(InstructionSelector* selector, InstructionCode opcode,
+             Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)));
 }
 
-
-void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRR(InstructionSelector* selector, InstructionCode opcode,
+              Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
                  g.UseRegister(node->InputAt(1)));
 }
 
-
-void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+void VisitRRO(InstructionSelector* selector, InstructionCode opcode, Node* node,
               ImmediateMode operand_mode) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -96,8 +96,8 @@
 
 
 #if V8_TARGET_ARCH_PPC64
-void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
-                            Node* node) {
+void VisitTryTruncateDouble(InstructionSelector* selector,
+                            InstructionCode opcode, Node* node) {
   PPCOperandGenerator g(selector);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
   InstructionOperand outputs[2];
@@ -144,15 +144,20 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
 template <typename Matcher>
-void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
-                ImmediateMode operand_mode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode) {
   FlagsContinuation cont;
   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
@@ -247,9 +252,7 @@
       inputs[input_count++] = g.UseUniqueRegister(offset);
       addressing_mode = kMode_MRR;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -720,7 +723,6 @@
   VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
 }
 
-
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Shr(Node* node) {
   PPCOperandGenerator g(this);
@@ -782,9 +784,109 @@
   VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
 }
 
+#if !V8_TARGET_ARCH_PPC64
+void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
+                    Node* node) {
+  PPCOperandGenerator g(selector);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  VisitPairBinop(this, kPPC_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  VisitPairBinop(this, kPPC_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  PPCOperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+
+  Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+}
+
+void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
+                    Node* node) {
+  PPCOperandGenerator g(selector);
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitPairShift(this, kPPC_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitPairShift(this, kPPC_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitPairShift(this, kPPC_ShiftRightAlgPair, node);
+}
+#endif
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  PPCOperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+      m.right().Is(32)) {
+    // Just load and sign-extend the interesting 4 bytes instead. This happens,
+    // for example, when we're loading and untagging SMIs.
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    if (mleft.matches() && mleft.index() == nullptr) {
+      int64_t offset = 0;
+      Node* displacement = mleft.displacement();
+      if (displacement != nullptr) {
+        Int64Matcher mdisplacement(displacement);
+        DCHECK(mdisplacement.HasValue());
+        offset = mdisplacement.Value();
+      }
+      offset = SmiWordOffset(offset);
+      if (g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)) {
+        Emit(kPPC_LoadWordS32 | AddressingModeField::encode(kMode_MRI),
+             g.DefineAsRegister(node), g.UseRegister(mleft.base()),
+             g.TempImmediate(offset));
+        return;
+      }
+    }
+  }
   VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
 }
 #endif
@@ -861,7 +963,6 @@
 }
 #endif
 
-
 void InstructionSelector::VisitInt32Sub(Node* node) {
   PPCOperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -994,6 +1095,9 @@
   VisitRR(this, kPPC_DoubleToUint32, node);
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kPPC_DoubleToUint32, node);
+}
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
@@ -1108,7 +1212,7 @@
 
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRRR(this, kPPC_AddDouble, node);
+  VisitRRR(this, kPPC_AddDouble | MiscField::encode(1), node);
 }
 
 
@@ -1122,11 +1226,11 @@
   PPCOperandGenerator g(this);
   Float32BinopMatcher m(node);
   if (m.left().IsMinusZero()) {
-    Emit(kPPC_NegDouble, g.DefineAsRegister(node),
+    Emit(kPPC_NegDouble | MiscField::encode(1), g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
     return;
   }
-  VisitRRR(this, kPPC_SubDouble, node);
+  VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
 }
 
 
@@ -1157,7 +1261,7 @@
 
 
 void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kPPC_MulDouble, node);
+  VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
 }
 
 
@@ -1168,7 +1272,7 @@
 
 
 void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kPPC_DivDouble, node);
+  VisitRRR(this, kPPC_DivDouble | MiscField::encode(1), node);
 }
 
 
@@ -1198,7 +1302,7 @@
 
 
 void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kPPC_AbsDouble, node);
+  VisitRR(this, kPPC_AbsDouble | MiscField::encode(1), node);
 }
 
 
@@ -1208,7 +1312,7 @@
 
 
 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kPPC_SqrtDouble, node);
+  VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
 }
 
 
@@ -1218,7 +1322,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, kPPC_FloorDouble, node);
+  VisitRR(this, kPPC_FloorDouble | MiscField::encode(1), node);
 }
 
 
@@ -1228,7 +1332,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, kPPC_CeilDouble, node);
+  VisitRR(this, kPPC_CeilDouble | MiscField::encode(1), node);
 }
 
 
@@ -1238,7 +1342,7 @@
 
 
 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, kPPC_TruncateDouble, node);
+  VisitRR(this, kPPC_TruncateDouble | MiscField::encode(1), node);
 }
 
 
@@ -1264,7 +1368,7 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kPPC_AddWithOverflow32,
                                          kInt16Imm, &cont);
   }
@@ -1276,7 +1380,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int32BinopMatcher>(this, node, kPPC_SubWithOverflow32,
                                          kInt16Imm_Negate, &cont);
   }
@@ -1289,7 +1393,7 @@
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
                                          &cont);
   }
@@ -1300,7 +1404,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
                                          &cont);
   }
@@ -1336,6 +1440,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1573,6 +1680,17 @@
   VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   PPCOperandGenerator g(this);
@@ -1603,7 +1721,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1613,32 +1731,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord32Compare(this, node, &cont);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
@@ -1648,62 +1768,66 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 #endif
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
@@ -1750,6 +1874,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   PPCOperandGenerator g(this);
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 0d4b8cb..728d79a 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -39,6 +39,17 @@
 Schedule* RawMachineAssembler::Export() {
   // Compute the correct codegen order.
   DCHECK(schedule_->rpo_order()->empty());
+  OFStream os(stdout);
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("--- RAW SCHEDULE -------------------------------------------\n");
+    os << *schedule_;
+  }
+  schedule_->EnsureSplitEdgeForm();
+  schedule_->PropagateDeferredMark();
+  if (FLAG_trace_turbo_scheduler) {
+    PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
+    os << *schedule_;
+  }
   Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate RawMachineAssembler.
   Schedule* schedule = schedule_;
@@ -79,15 +90,17 @@
   BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
   for (size_t index = 0; index < case_count; ++index) {
     int32_t case_value = case_values[index];
-    BasicBlock* case_block = Use(case_labels[index]);
+    BasicBlock* case_block = schedule()->NewBasicBlock();
     Node* case_node =
         graph()->NewNode(common()->IfValue(case_value), switch_node);
     schedule()->AddNode(case_block, case_node);
+    schedule()->AddGoto(case_block, Use(case_labels[index]));
     succ_blocks[index] = case_block;
   }
-  BasicBlock* default_block = Use(default_label);
+  BasicBlock* default_block = schedule()->NewBasicBlock();
   Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
   schedule()->AddNode(default_block, default_node);
+  schedule()->AddGoto(default_block, Use(default_label));
   succ_blocks[case_count] = default_block;
   schedule()->AddSwitch(CurrentBlock(), switch_node, succ_blocks, succ_count);
   current_block_ = nullptr;
@@ -247,6 +260,27 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
+                                            Node* context) {
+  const int kArity = 0;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
 
 Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
                                             Node* arg1, Node* context) {
@@ -407,6 +441,7 @@
   DCHECK(!label->bound_);
   label->bound_ = true;
   current_block_ = EnsureBlock(label);
+  current_block_->set_deferred(label->deferred_);
 }
 
 
@@ -459,11 +494,6 @@
   return graph()->NewNodeUnchecked(op, input_count, inputs);
 }
 
-
-RawMachineLabel::RawMachineLabel()
-    : block_(nullptr), used_(false), bound_(false) {}
-
-
 RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
 
 }  // namespace compiler
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index a0cb7a0..f3445ac 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -324,6 +324,24 @@
   Node* Uint64Mod(Node* a, Node* b) {
     return AddNode(machine()->Uint64Mod(), a, b);
   }
+  Node* Int32PairAdd(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairAdd(), a_low, a_high, b_low, b_high);
+  }
+  Node* Int32PairSub(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairSub(), a_low, a_high, b_low, b_high);
+  }
+  Node* Int32PairMul(Node* a_low, Node* a_high, Node* b_low, Node* b_high) {
+    return AddNode(machine()->Int32PairMul(), a_low, a_high, b_low, b_high);
+  }
+  Node* Word32PairShl(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairShl(), low_word, high_word, shift);
+  }
+  Node* Word32PairShr(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairShr(), low_word, high_word, shift);
+  }
+  Node* Word32PairSar(Node* low_word, Node* high_word, Node* shift) {
+    return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
+  }
 
 #define INTPTR_BINOP(prefix, name)                     \
   Node* IntPtr##name(Node* a, Node* b) {               \
@@ -332,7 +350,9 @@
   }
 
   INTPTR_BINOP(Int, Add);
+  INTPTR_BINOP(Int, AddWithOverflow);
   INTPTR_BINOP(Int, Sub);
+  INTPTR_BINOP(Int, SubWithOverflow);
   INTPTR_BINOP(Int, LessThan);
   INTPTR_BINOP(Int, LessThanOrEqual);
   INTPTR_BINOP(Word, Equal);
@@ -374,6 +394,7 @@
     return AddNode(machine()->Float32Min().op(), a, b);
   }
   Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
+  Node* Float32Neg(Node* a) { return Float32Sub(Float32Constant(-0.0f), a); }
   Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
   Node* Float32Equal(Node* a, Node* b) {
     return AddNode(machine()->Float32Equal(), a, b);
@@ -414,6 +435,7 @@
     return AddNode(machine()->Float64Min().op(), a, b);
   }
   Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
+  Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
   Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
   Node* Float64Equal(Node* a, Node* b) {
     return AddNode(machine()->Float64Equal(), a, b);
@@ -448,6 +470,9 @@
   Node* ChangeFloat64ToUint32(Node* a) {
     return AddNode(machine()->ChangeFloat64ToUint32(), a);
   }
+  Node* TruncateFloat64ToUint32(Node* a) {
+    return AddNode(machine()->TruncateFloat64ToUint32(), a);
+  }
   Node* TruncateFloat32ToInt32(Node* a) {
     return AddNode(machine()->TruncateFloat32ToInt32(), a);
   }
@@ -457,22 +482,12 @@
   Node* TryTruncateFloat32ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
   }
-  Node* TruncateFloat64ToInt64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
-  }
   Node* TryTruncateFloat64ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
   }
   Node* TryTruncateFloat32ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
   }
-  Node* TruncateFloat64ToUint64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
-  }
   Node* TryTruncateFloat64ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
   }
@@ -622,6 +637,8 @@
 
   // Tail call the given call descriptor and the given arguments.
   Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+  // Tail call to a runtime function with zero arguments.
+  Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
   // Tail call to a runtime function with one argument.
   Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
                          Node* context);
@@ -708,13 +725,17 @@
 
 class RawMachineLabel final {
  public:
-  RawMachineLabel();
+  enum Type { kDeferred, kNonDeferred };
+
+  explicit RawMachineLabel(Type type = kNonDeferred)
+      : deferred_(type == kDeferred) {}
   ~RawMachineLabel();
 
  private:
-  BasicBlock* block_;
-  bool used_;
-  bool bound_;
+  BasicBlock* block_ = nullptr;
+  bool used_ = false;
+  bool bound_ = false;
+  bool deferred_;
   friend class RawMachineAssembler;
   DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
 };
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 0b12e14..f2160f5 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -32,9 +32,9 @@
        i <= Instruction::LAST_GAP_POSITION; i++) {
     Instruction::GapPosition inner_pos =
         static_cast<Instruction::GapPosition>(i);
-    auto moves = instr->GetParallelMove(inner_pos);
+    const ParallelMove* moves = instr->GetParallelMove(inner_pos);
     if (moves == nullptr) continue;
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsRedundant()) continue;
       CHECK(move->source().IsAllocated() || move->source().IsConstant());
       CHECK(move->destination().IsAllocated());
@@ -81,11 +81,12 @@
   // TODO(dcarney): model unique constraints.
   // Construct OperandConstraints for all InstructionOperands, eliminating
   // kSameAsFirst along the way.
-  for (const auto* instr : sequence->instructions()) {
+  for (const Instruction* instr : sequence->instructions()) {
     // All gaps should be totally unallocated at this point.
     VerifyEmptyGaps(instr);
     const size_t operand_count = OperandCount(instr);
-    auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
+    OperandConstraint* op_constraints =
+        zone->NewArray<OperandConstraint>(operand_count);
     size_t count = 0;
     for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
       BuildConstraint(instr->InputAt(i), &op_constraints[count]);
@@ -115,11 +116,12 @@
   CHECK(sequence()->instructions().size() == constraints()->size());
   auto instr_it = sequence()->begin();
   for (const auto& instr_constraint : *constraints()) {
-    const auto* instr = instr_constraint.instruction_;
+    const Instruction* instr = instr_constraint.instruction_;
     // All gaps should be totally allocated at this point.
     VerifyAllocatedGaps(instr);
     const size_t operand_count = instr_constraint.operand_constaints_size_;
-    const auto* op_constraints = instr_constraint.operand_constraints_;
+    const OperandConstraint* op_constraints =
+        instr_constraint.operand_constraints_;
     CHECK_EQ(instr, *instr_it);
     CHECK(operand_count == OperandCount(instr));
     size_t count = 0;
@@ -148,14 +150,14 @@
   } else if (op->IsExplicit()) {
     constraint->type_ = kExplicit;
   } else if (op->IsImmediate()) {
-    auto imm = ImmediateOperand::cast(op);
+    const ImmediateOperand* imm = ImmediateOperand::cast(op);
     int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
                                                         : imm->indexed_value();
     constraint->type_ = kImmediate;
     constraint->value_ = value;
   } else {
     CHECK(op->IsUnallocated());
-    const auto* unallocated = UnallocatedOperand::cast(op);
+    const UnallocatedOperand* unallocated = UnallocatedOperand::cast(op);
     int vreg = unallocated->virtual_register();
     constraint->virtual_register_ = vreg;
     if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
@@ -213,7 +215,7 @@
       return;
     case kImmediate: {
       CHECK(op->IsImmediate());
-      auto imm = ImmediateOperand::cast(op);
+      const ImmediateOperand* imm = ImmediateOperand::cast(op);
       int value = imm->type() == ImmediateOperand::INLINE
                       ? imm->inline_value()
                       : imm->indexed_value();
@@ -324,7 +326,7 @@
       if (this->empty()) return;
       auto it = this->begin();
       OperandLess less;
-      for (const auto& o : other) {
+      for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
         while (less(it->first, o.first)) {
           this->erase(it++);
           if (it == this->end()) return;
@@ -346,7 +348,7 @@
   void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
     // Compute outgoing mappings.
     Map to_insert(zone);
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsEliminated()) continue;
       auto cur = map().find(&move->source());
       CHECK(cur != map().end());
@@ -356,7 +358,7 @@
       CHECK(res.second);
     }
     // Drop current mappings.
-    for (auto move : *moves) {
+    for (const MoveOperands* move : *moves) {
       if (move->IsEliminated()) continue;
       auto cur = map().find(&move->destination());
       if (cur != map().end()) map().erase(cur);
@@ -368,8 +370,9 @@
   void RunGaps(Zone* zone, const Instruction* instr) {
     for (int i = Instruction::FIRST_GAP_POSITION;
          i <= Instruction::LAST_GAP_POSITION; i++) {
-      auto inner_pos = static_cast<Instruction::GapPosition>(i);
-      auto move = instr->GetParallelMove(inner_pos);
+      Instruction::GapPosition inner_pos =
+          static_cast<Instruction::GapPosition>(i);
+      const ParallelMove* move = instr->GetParallelMove(inner_pos);
       if (move == nullptr) continue;
       RunParallelMoves(zone, move);
     }
@@ -383,7 +386,7 @@
   void DropRegisters(const RegisterConfiguration* config) {
     // TODO(dcarney): sort map by kind and drop range.
     for (auto it = map().begin(); it != map().end();) {
-      auto op = it->first;
+      const InstructionOperand* op = it->first;
       if (op->IsRegister() || op->IsDoubleRegister()) {
         map().erase(it++);
       } else {
@@ -394,7 +397,7 @@
 
   MapValue* Define(Zone* zone, const InstructionOperand* op,
                    int virtual_register) {
-    auto value = new (zone) MapValue();
+    MapValue* value = new (zone) MapValue();
     value->define_vreg = virtual_register;
     auto res = map().insert(std::make_pair(op, value));
     if (!res.second) res.first->second = value;
@@ -404,7 +407,7 @@
   void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
     auto it = map().find(op);
     CHECK(it != map().end());
-    auto v = it->second;
+    MapValue* v = it->second;
     if (v->define_vreg != kInvalidVreg) {
       CHECK_EQ(v->define_vreg, use_vreg);
     }
@@ -445,7 +448,7 @@
               bool initial_pass) {
     auto it = map().find(op);
     CHECK(it != map().end());
-    auto v = it->second;
+    MapValue* v = it->second;
     int use_vreg = phi->virtual_register;
     // Phis are not defined.
     CHECK_EQ(kInvalidVreg, v->define_vreg);
@@ -473,7 +476,7 @@
         CHECK(v->define_vreg == phi->first_pred_vreg);
       } else if (v->use_vreg != phi->first_pred_vreg) {
         // Walk the phi chain, hunting for a matching phi use.
-        auto p = phi;
+        const PhiData* p = phi;
         for (; p != nullptr; p = p->first_pred_phi) {
           if (p->virtual_register == v->use_vreg) break;
         }
@@ -529,12 +532,12 @@
     BlockIds block_ids((BlockIds::key_compare()),
                        zone_allocator<size_t>(zone()));
     // First ensure that incoming contains only keys in all predecessors.
-    for (auto block : sequence()->instruction_blocks()) {
+    for (const InstructionBlock* block : sequence()->instruction_blocks()) {
       size_t index = block->rpo_number().ToSize();
       block_ids.insert(index);
-      auto& succ_map = incoming_maps_[index]->map();
+      OperandMap::Map& succ_map = incoming_maps_[index]->map();
       for (size_t i = 0; i < block->PredecessorCount(); ++i) {
-        auto pred_rpo = block->predecessors()[i];
+        RpoNumber pred_rpo = block->predecessors()[i];
         succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
       }
     }
@@ -545,8 +548,9 @@
       const size_t succ_index = *block_id_it;
       block_ids.erase(block_id_it);
       // Propagate uses back to their definition blocks using succ_vreg.
-      auto block = sequence()->instruction_blocks()[succ_index];
-      auto& succ_map = incoming_maps_[succ_index]->map();
+      const InstructionBlock* block =
+          sequence()->instruction_blocks()[succ_index];
+      OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
       for (size_t i = 0; i < block->PredecessorCount(); ++i) {
         for (auto& succ_val : succ_map) {
           // An incoming map contains no defines.
@@ -561,15 +565,15 @@
           if (succ_vreg == kInvalidVreg) continue;
           // May need to transition phi.
           if (IsPhi(succ_vreg)) {
-            auto phi = GetPhi(succ_vreg);
+            const PhiData* phi = GetPhi(succ_vreg);
             if (phi->definition_rpo.ToSize() == succ_index) {
               // phi definition block, transition to pred value.
               succ_vreg = phi->operands[i];
             }
           }
           // Push succ_vreg up to all predecessors.
-          auto pred_rpo = block->predecessors()[i];
-          auto& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
+          RpoNumber pred_rpo = block->predecessors()[i];
+          OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
           auto& pred_val = *pred_map.find(succ_val.first);
           if (pred_val.second->use_vreg != kInvalidVreg) {
             CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
@@ -606,7 +610,7 @@
       }
     }
     // Clear uses and back links for second pass.
-    for (auto operand_map : incoming_maps_) {
+    for (OperandMap* operand_map : incoming_maps_) {
       for (auto& succ_val : operand_map->map()) {
         succ_val.second->incoming = nullptr;
         succ_val.second->use_vreg = kInvalidVreg;
@@ -616,18 +620,19 @@
 
  private:
   OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
-    auto to_init = outgoing_maps_[block_index];
+    OperandMap* to_init = outgoing_maps_[block_index];
     CHECK(to_init->map().empty());
-    auto block = sequence()->instruction_blocks()[block_index];
+    const InstructionBlock* block =
+        sequence()->instruction_blocks()[block_index];
     if (block->predecessors().empty()) return to_init;
     size_t predecessor_index = block->predecessors()[0].ToSize();
     // Ensure not a backedge.
     CHECK(predecessor_index < block->rpo_number().ToSize());
-    auto incoming = outgoing_maps_[predecessor_index];
+    OperandMap* incoming = outgoing_maps_[predecessor_index];
     // Copy map and replace values.
     to_init->map() = incoming->map();
     for (auto& it : to_init->map()) {
-      auto incoming = it.second;
+      OperandMap::MapValue* incoming = it.second;
       it.second = new (zone()) OperandMap::MapValue();
       it.second->incoming = incoming;
     }
@@ -653,8 +658,9 @@
   void InitializePhis() {
     const size_t block_count = sequence()->instruction_blocks().size();
     for (size_t block_index = 0; block_index < block_count; ++block_index) {
-      const auto block = sequence()->instruction_blocks()[block_index];
-      for (auto phi : block->phis()) {
+      const InstructionBlock* block =
+          sequence()->instruction_blocks()[block_index];
+      for (const PhiInstruction* phi : block->phis()) {
         int first_pred_vreg = phi->operands()[0];
         const PhiData* first_pred_phi = nullptr;
         if (IsPhi(first_pred_vreg)) {
@@ -662,7 +668,7 @@
           first_pred_vreg = first_pred_phi->first_pred_vreg;
         }
         CHECK(!IsPhi(first_pred_vreg));
-        auto phi_data = new (zone()) PhiData(
+        PhiData* phi_data = new (zone()) PhiData(
             block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
         auto res =
             phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
@@ -700,14 +706,17 @@
                                                bool initial_pass) {
   const size_t block_count = sequence()->instruction_blocks().size();
   for (size_t block_index = 0; block_index < block_count; ++block_index) {
-    auto current = block_maps->InitializeIncoming(block_index, initial_pass);
-    const auto block = sequence()->instruction_blocks()[block_index];
+    OperandMap* current =
+        block_maps->InitializeIncoming(block_index, initial_pass);
+    const InstructionBlock* block =
+        sequence()->instruction_blocks()[block_index];
     for (int instr_index = block->code_start(); instr_index < block->code_end();
          ++instr_index) {
-      const auto& instr_constraint = constraints_[instr_index];
-      const auto instr = instr_constraint.instruction_;
+      const InstructionConstraint& instr_constraint = constraints_[instr_index];
+      const Instruction* instr = instr_constraint.instruction_;
       current->RunGaps(zone(), instr);
-      const auto op_constraints = instr_constraint.operand_constraints_;
+      const OperandConstraint* op_constraints =
+          instr_constraint.operand_constraints_;
       size_t count = 0;
       for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
         if (op_constraints[count].type_ == kImmediate ||
@@ -715,11 +724,11 @@
           continue;
         }
         int virtual_register = op_constraints[count].virtual_register_;
-        auto op = instr->InputAt(i);
+        const InstructionOperand* op = instr->InputAt(i);
         if (!block_maps->IsPhi(virtual_register)) {
           current->Use(op, virtual_register, initial_pass);
         } else {
-          auto phi = block_maps->GetPhi(virtual_register);
+          const PhiData* phi = block_maps->GetPhi(virtual_register);
           current->UsePhi(op, phi, initial_pass);
         }
       }
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 02ba1f1..82faf75 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -25,7 +25,6 @@
   v->erase(it);
 }
 
-
 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
   return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
                                   : cfg->num_general_registers();
@@ -1561,7 +1560,6 @@
   return true;
 }
 
-
 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
     TopLevelLiveRange* range) {
   DCHECK(!range->HasSpillOperand());
@@ -2197,8 +2195,22 @@
     int phi_vreg = phi->virtual_register();
     live->Remove(phi_vreg);
     InstructionOperand* hint = nullptr;
-    Instruction* instr = GetLastInstruction(
-        code(), code()->InstructionBlockAt(block->predecessors()[0]));
+    const InstructionBlock::Predecessors& predecessors = block->predecessors();
+    const InstructionBlock* predecessor_block =
+        code()->InstructionBlockAt(predecessors[0]);
+    const Instruction* instr = GetLastInstruction(code(), predecessor_block);
+    if (predecessor_block->IsDeferred()) {
+      // "Prefer the hint from the first non-deferred predecessor, if any.
+      for (size_t i = 1; i < predecessors.size(); ++i) {
+        predecessor_block = code()->InstructionBlockAt(predecessors[i]);
+        if (!predecessor_block->IsDeferred()) {
+          instr = GetLastInstruction(code(), predecessor_block);
+          break;
+        }
+      }
+    }
+    DCHECK_NOT_NULL(instr);
+
     for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
       InstructionOperand& to = move->destination();
       if (to.IsUnallocated() &&
@@ -2322,11 +2334,79 @@
   for (auto& hint : phi_hints_) {
     CHECK(hint.second->IsResolved());
   }
-  for (TopLevelLiveRange* current : data()->live_ranges()) {
-    if (current != nullptr && !current->IsEmpty()) current->Verify();
+  for (const TopLevelLiveRange* current : data()->live_ranges()) {
+    if (current != nullptr && !current->IsEmpty()) {
+      // New LiveRanges should not be split.
+      CHECK_NULL(current->next());
+      // General integrity check.
+      current->Verify();
+      const UseInterval* first = current->first_interval();
+      if (first->next() == nullptr) continue;
+
+      // Consecutive intervals should not end and start in the same block,
+      // otherwise the intervals should have been joined, because the
+      // variable is live throughout that block.
+      CHECK(NextIntervalStartsInDifferentBlocks(first));
+
+      for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
+        // Except for the first interval, the other intevals must start at
+        // a block boundary, otherwise data wouldn't flow to them.
+        CHECK(IntervalStartsAtBlockBoundary(i));
+        // The last instruction of the predecessors of the block the interval
+        // starts must be covered by the range.
+        CHECK(IntervalPredecessorsCoveredByRange(i, current));
+        if (i->next() != nullptr) {
+          // Check the consecutive intervals property, except for the last
+          // interval, where it doesn't apply.
+          CHECK(NextIntervalStartsInDifferentBlocks(i));
+        }
+      }
+    }
   }
 }
 
+bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
+    const UseInterval* interval) const {
+  LifetimePosition start = interval->start();
+  if (!start.IsFullStart()) return false;
+  int instruction_index = start.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(instruction_index);
+  return block->first_instruction_index() == instruction_index;
+}
+
+bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
+    const UseInterval* interval, const TopLevelLiveRange* range) const {
+  LifetimePosition start = interval->start();
+  int instruction_index = start.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(instruction_index);
+  for (RpoNumber pred_index : block->predecessors()) {
+    const InstructionBlock* predecessor =
+        data()->code()->InstructionBlockAt(pred_index);
+    LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
+        predecessor->last_instruction_index());
+    last_pos = last_pos.NextStart().End();
+    if (!range->Covers(last_pos)) return false;
+  }
+  return true;
+}
+
+bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
+    const UseInterval* interval) const {
+  DCHECK_NOT_NULL(interval->next());
+  LifetimePosition end = interval->end();
+  LifetimePosition next_start = interval->next()->start();
+  // Since end is not covered, but the previous position is, move back a
+  // position
+  end = end.IsStart() ? end.PrevStart().End() : end.Start();
+  int last_covered_index = end.ToInstructionIndex();
+  const InstructionBlock* block =
+      data()->code()->GetInstructionBlock(last_covered_index);
+  const InstructionBlock* next_block =
+      data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
+  return block->rpo_number() < next_block->rpo_number();
+}
 
 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
                                      RegisterKind kind)
@@ -3081,21 +3161,14 @@
   for (TopLevelLiveRange* range : data()->live_ranges()) {
     if (range == nullptr || range->IsEmpty()) continue;
     // We care only about ranges which spill in the frame.
-    if (!range->HasSpillRange()) continue;
-    if (range->IsSpilledOnlyInDeferredBlocks()) {
-      for (LiveRange* child = range; child != nullptr; child = child->next()) {
-        if (child->spilled()) {
-          code->GetInstructionBlock(child->Start().ToInstructionIndex())
-              ->mark_needs_frame();
-        }
-      }
-    } else {
-      TopLevelLiveRange::SpillMoveInsertionList* spills =
-          range->GetSpillMoveInsertionLocations();
-      DCHECK_NOT_NULL(spills);
-      for (; spills != nullptr; spills = spills->next) {
-        code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
-      }
+    if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
+      continue;
+    }
+    TopLevelLiveRange::SpillMoveInsertionList* spills =
+        range->GetSpillMoveInsertionLocations();
+    DCHECK_NOT_NULL(spills);
+    for (; spills != nullptr; spills = spills->next) {
+      code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
     }
   }
 }
@@ -3558,7 +3631,7 @@
     worklist.pop();
     if (done_blocks.Contains(block_id)) continue;
     done_blocks.Add(block_id);
-    const InstructionBlock* spill_block =
+    InstructionBlock* spill_block =
         code->InstructionBlockAt(RpoNumber::FromInt(block_id));
 
     for (const RpoNumber& pred : spill_block->predecessors()) {
@@ -3578,6 +3651,7 @@
         data()->AddGapMove(spill_block->first_instruction_index(),
                            Instruction::GapPosition::START, pred_op,
                            spill_operand);
+        spill_block->mark_needs_frame();
       }
     }
   }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 38fad05..d6ed005 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -919,7 +919,12 @@
     return data()->live_in_sets();
   }
 
+  // Verification.
   void Verify() const;
+  bool IntervalStartsAtBlockBoundary(const UseInterval* interval) const;
+  bool IntervalPredecessorsCoveredByRange(const UseInterval* interval,
+                                          const TopLevelLiveRange* range) const;
+  bool NextIntervalStartsInDifferentBlocks(const UseInterval* interval) const;
 
   // Liveness analysis support.
   void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 2f7720b..f59c8bc 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -142,7 +142,8 @@
     case MachineRepresentation::kWord8:
     case MachineRepresentation::kWord16:
     case MachineRepresentation::kWord32:
-      return GetWord32RepresentationFor(node, output_rep, output_type);
+      return GetWord32RepresentationFor(node, output_rep, output_type,
+                                        truncation);
     case MachineRepresentation::kWord64:
       return GetWord64RepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kSimd128:  // Fall through.
@@ -234,30 +235,34 @@
       break;
   }
   // Select the correct X -> Float32 operator.
-  const Operator* op;
-  if (output_rep == MachineRepresentation::kBit) {
-    return TypeError(node, output_rep, output_type,
-                     MachineRepresentation::kFloat32);
-  } else if (IsWord(output_rep)) {
+  const Operator* op = nullptr;
+  if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
+      // int32 -> float64 -> float32
       op = machine()->ChangeInt32ToFloat64();
-    } else {
-      // Either the output is int32 or the uses only care about the
-      // low 32 bits (so we can pick int32 safely).
-      DCHECK(output_type->Is(Type::Unsigned32()) ||
-             truncation.TruncatesToWord32());
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    } else if (output_type->Is(Type::Unsigned32()) ||
+               truncation.TruncatesToWord32()) {
+      // Either the output is uint32 or the uses only care about the
+      // low 32 bits (so we can pick uint32 safely).
+
+      // uint32 -> float64 -> float32
       op = machine()->ChangeUint32ToFloat64();
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
     }
-    // int32 -> float64 -> float32
-    node = jsgraph()->graph()->NewNode(op, node);
-    op = machine()->TruncateFloat64ToFloat32();
   } else if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
-    node = jsgraph()->graph()->NewNode(op, node);
-    op = machine()->TruncateFloat64ToFloat32();
+    if (output_type->Is(Type::Number())) {
+      op = simplified()
+               ->ChangeTaggedToFloat64();  // tagged -> float64 -> float32
+      node = jsgraph()->graph()->NewNode(op, node);
+      op = machine()->TruncateFloat64ToFloat32();
+    }
   } else if (output_rep == MachineRepresentation::kFloat64) {
     op = machine()->TruncateFloat64ToFloat32();
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kFloat32);
   }
@@ -289,25 +294,24 @@
       break;
   }
   // Select the correct X -> Float64 operator.
-  const Operator* op;
-  if (output_rep == MachineRepresentation::kBit) {
-    return TypeError(node, output_rep, output_type,
-                     MachineRepresentation::kFloat64);
-  } else if (IsWord(output_rep)) {
+  const Operator* op = nullptr;
+  if (IsWord(output_rep)) {
     if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeInt32ToFloat64();
-    } else {
-      // Either the output is int32 or the uses only care about the
-      // low 32 bits (so we can pick int32 safely).
-      DCHECK(output_type->Is(Type::Unsigned32()) ||
-             truncation.TruncatesToWord32());
+    } else if (output_type->Is(Type::Unsigned32()) ||
+               truncation.TruncatesToWord32()) {
+      // Either the output is uint32 or the uses only care about the
+      // low 32 bits (so we can pick uint32 safely).
       op = machine()->ChangeUint32ToFloat64();
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    op = simplified()->ChangeTaggedToFloat64();
+    if (output_type->Is(Type::Number())) {
+      op = simplified()->ChangeTaggedToFloat64();
+    }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     op = machine()->ChangeFloat32ToFloat64();
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kFloat64);
   }
@@ -319,9 +323,9 @@
   return jsgraph()->Int32Constant(DoubleToInt32(value));
 }
 
-
 Node* RepresentationChanger::GetWord32RepresentationFor(
-    Node* node, MachineRepresentation output_rep, Type* output_type) {
+    Node* node, MachineRepresentation output_rep, Type* output_type,
+    Truncation truncation) {
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kInt32Constant:
@@ -335,43 +339,37 @@
       break;
   }
   // Select the correct X -> Word32 operator.
-  const Operator* op;
-  Type* type = NodeProperties::GetType(node);
-
+  const Operator* op = nullptr;
   if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    // TODO(jarin) Use only output_type here, once we intersect it with the
-    // type inferred by the typer.
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
   } else if (output_rep == MachineRepresentation::kTagged) {
-    if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+    if (output_type->Is(Type::Unsigned32())) {
       op = simplified()->ChangeTaggedToUint32();
-    } else if (output_type->Is(Type::Signed32()) ||
-               type->Is(Type::Signed32())) {
+    } else if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else {
+    } else if (truncation.TruncatesToWord32()) {
       node = InsertChangeTaggedToFloat64(node);
       op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
     }
-  } else {
+  }
+  if (op == nullptr) {
     return TypeError(node, output_rep, output_type,
                      MachineRepresentation::kWord32);
   }
@@ -467,6 +465,10 @@
       return machine()->Uint32LessThan();
     case IrOpcode::kNumberLessThanOrEqual:
       return machine()->Uint32LessThanOrEqual();
+    case IrOpcode::kNumberClz32:
+      return machine()->Word32Clz();
+    case IrOpcode::kNumberImul:
+      return machine()->Int32Mul();
     default:
       UNREACHABLE();
       return nullptr;
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 62ea3b4..24e28f3 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -121,7 +121,7 @@
                                     MachineRepresentation output_rep,
                                     Type* output_type, Truncation truncation);
   Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
-                                   Type* output_type);
+                                   Type* output_type, Truncation truncation);
   Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
                                 Type* output_type);
   Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
diff --git a/src/compiler/s390/OWNERS b/src/compiler/s390/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/compiler/s390/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
new file mode 100644
index 0000000..68c1d9d
--- /dev/null
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -0,0 +1,2085 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+#define kScratchReg ip
+
+// Adds S390-specific methods to convert InstructionOperands.
+class S390OperandConverter final : public InstructionOperandConverter {
+ public:
+  S390OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  size_t OutputCount() { return instr_->OutputCount(); }
+
+  bool CompareLogical() const {
+    switch (instr_->flags_condition()) {
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        return true;
+      default:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+  Operand InputImmediate(size_t index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+        return Operand(constant.ToInt64());
+#endif
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+      case Constant::kRpoNumber:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+    const size_t index = *first_index;
+    *mode = AddressingModeField::decode(instr_->opcode());
+    switch (*mode) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+    return MemoryOperand(mode, &first_index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK_NOT_NULL(op);
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+  }
+
+  MemOperand SlotToMemOperand(int slot) const {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineLoadNAN32 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+class OutOfLineLoadNAN64 final : public OutOfLineCode {
+ public:
+  OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final {
+    __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
+                         kScratchReg);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+class OutOfLineLoadZero final : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
+
+ private:
+  Register const result_;
+};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(offset),
+        offset_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(no_reg),
+        offset_immediate_(offset),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode),
+        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+
+  void Generate() final {
+    if (mode_ > RecordWriteMode::kValueIsPointer) {
+      __ JumpIfSmi(value_, exit());
+    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
+    SaveFPRegsMode const save_fp_mode =
+        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+    if (must_save_lr_) {
+      // We need to save and restore r14 if the frame was elided.
+      __ Push(r14);
+    }
+    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+                         remembered_set_action, save_fp_mode);
+    if (offset_.is(no_reg)) {
+      __ AddP(scratch1_, object_, Operand(offset_immediate_));
+    } else {
+      DCHECK_EQ(0, offset_immediate_);
+      __ AddP(scratch1_, object_, offset_);
+    }
+    __ CallStub(&stub);
+    if (must_save_lr_) {
+      // We need to save and restore r14 if the frame was elided.
+      __ Pop(r14);
+    }
+  }
+
+ private:
+  Register const object_;
+  Register const offset_;
+  int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
+  Register const value_;
+  Register const scratch0_;
+  Register const scratch1_;
+  RecordWriteMode const mode_;
+  bool must_save_lr_;
+};
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
+  switch (condition) {
+    case kEqual:
+      return eq;
+    case kNotEqual:
+      return ne;
+    case kSignedLessThan:
+    case kUnsignedLessThan:
+      return lt;
+    case kSignedGreaterThanOrEqual:
+    case kUnsignedGreaterThanOrEqual:
+      return ge;
+    case kSignedLessThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+      return le;
+    case kSignedGreaterThan:
+    case kUnsignedGreaterThan:
+      return gt;
+    case kOverflow:
+      // Overflow checked for AddP/SubP only.
+      switch (op) {
+#if V8_TARGET_ARCH_S390X
+        case kS390_Add:
+        case kS390_Sub:
+          return lt;
+#endif
+        case kS390_AddWithOverflow32:
+        case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+          return ne;
+#else
+          return lt;
+#endif
+        default:
+          break;
+      }
+      break;
+    case kNotOverflow:
+      switch (op) {
+#if V8_TARGET_ARCH_S390X
+        case kS390_Add:
+        case kS390_Sub:
+          return ge;
+#endif
+        case kS390_AddWithOverflow32:
+        case kS390_SubWithOverflow32:
+#if V8_TARGET_ARCH_S390X
+          return eq;
+#else
+          return ge;
+#endif
+        default:
+          break;
+      }
+      break;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return kNoCondition;
+}
+
+}  // namespace
+
+#define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
+  do {                                                                \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
+  do {                                                               \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                 i.InputDoubleRegister(1));                          \
+  } while (0)
+
+#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1));                    \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputImmediate(1));                   \
+    }                                                          \
+  } while (0)
+
+#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm)       \
+  do {                                                         \
+    if (HasRegisterInput(instr, 1)) {                          \
+      __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputRegister(1));                    \
+    } else {                                                   \
+      __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
+                       i.InputInt32(1));                       \
+    }                                                          \
+  } while (0)
+
+#define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputInt32(1), kScratchReg, r0);      \
+    }                                                                   \
+  } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
+  do {                                                                  \
+    if (HasRegisterInput(instr, 1)) {                                   \
+      __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                i.InputRegister(1), kScratchReg, r0);   \
+    } else {                                                            \
+      __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
+                                -i.InputInt32(1), kScratchReg, r0);     \
+    }                                                                   \
+  } while (0)
+
+#if V8_TARGET_ARCH_S390X
+#define ASSEMBLE_ADD_WITH_OVERFLOW32()      \
+  do {                                      \
+    ASSEMBLE_BINOP(AddP, AddP);             \
+    __ TestIfInt32(i.OutputRegister(), r0); \
+  } while (0)
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32()      \
+  do {                                      \
+    ASSEMBLE_BINOP(SubP, SubP);             \
+    __ TestIfInt32(i.OutputRegister(), r0); \
+  } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
+#endif
+
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
+  do {                                                          \
+    if (HasRegisterInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                 \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));  \
+      } else {                                                  \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));   \
+      }                                                         \
+    } else {                                                    \
+      if (i.CompareLogical()) {                                 \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
+      } else {                                                  \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));  \
+      }                                                         \
+    }                                                           \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                            \
+  do {                                                               \
+    __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+  } while (0)
+
+// Divide instruction dr will implicity use register pair
+// r0 & r1 below.
+// R0:R1 = R1 / divisor - R0 remainder
+// Copy remainder to output reg
+#define ASSEMBLE_MODULO(div_instr, shift_instr) \
+  do {                                          \
+    __ LoadRR(r0, i.InputRegister(0));          \
+    __ shift_instr(r0, Operand(32));            \
+    __ div_instr(r0, i.InputRegister(1));       \
+    __ ltr(i.OutputRegister(), r0);             \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MODULO()                                               \
+  do {                                                                        \
+    FrameScope scope(masm(), StackFrame::MANUAL);                             \
+    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
+    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
+                            i.InputDoubleRegister(1));                        \
+    __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
+                     0, 2);                                                   \
+    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
+  do {                                                              \
+    Label ge, done;                                                 \
+    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
+    __ bge(&ge, Label::kNear);                                      \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
+    __ b(&done, Label::kNear);                                      \
+    __ bind(&ge);                                                   \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
+    __ bind(&done);                                                 \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
+  do {                                                              \
+    Label ge, done;                                                 \
+    __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
+    __ bge(&ge, Label::kNear);                                      \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
+    __ b(&done, Label::kNear);                                      \
+    __ bind(&ge);                                                   \
+    __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
+    __ bind(&done);                                                 \
+  } while (0)
+
+// Only MRI mode for these instructions available
+#define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
+  do {                                                \
+    DoubleRegister result = i.OutputDoubleRegister(); \
+    AddressingMode mode = kMode_None;                 \
+    MemOperand operand = i.MemoryOperand(&mode);      \
+    __ asm_instr(result, operand);                    \
+  } while (0)
+
+#define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
+  do {                                           \
+    Register result = i.OutputRegister();        \
+    AddressingMode mode = kMode_None;            \
+    MemOperand operand = i.MemoryOperand(&mode); \
+    __ asm_instr(result, operand);               \
+  } while (0)
+
+#define ASSEMBLE_STORE_FLOAT32()                         \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    __ StoreFloat32(value, operand);                     \
+  } while (0)
+
+#define ASSEMBLE_STORE_DOUBLE()                          \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    __ StoreDouble(value, operand);                      \
+  } while (0)
+
+#define ASSEMBLE_STORE_INTEGER(asm_instr)                \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    Register value = i.InputRegister(index);             \
+    __ asm_instr(value, operand);                        \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width)              \
+  do {                                                             \
+    DoubleRegister result = i.OutputDoubleRegister();              \
+    size_t index = 0;                                              \
+    AddressingMode mode = kMode_None;                              \
+    MemOperand operand = i.MemoryOperand(&mode, index);            \
+    Register offset = operand.rb();                                \
+    __ lgfr(offset, offset);                                       \
+    if (HasRegisterInput(instr, 2)) {                              \
+      __ CmpLogical32(offset, i.InputRegister(2));                 \
+    } else {                                                       \
+      __ CmpLogical32(offset, i.InputImmediate(2));                \
+    }                                                              \
+    auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
+    __ bge(ool->entry());                                          \
+    __ asm_instr(result, operand);                                 \
+    __ bind(ool->exit());                                          \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
+  do {                                                       \
+    Register result = i.OutputRegister();                    \
+    size_t index = 0;                                        \
+    AddressingMode mode = kMode_None;                        \
+    MemOperand operand = i.MemoryOperand(&mode, index);      \
+    Register offset = operand.rb();                          \
+    __ lgfr(offset, offset);                                 \
+    if (HasRegisterInput(instr, 2)) {                        \
+      __ CmpLogical32(offset, i.InputRegister(2));           \
+    } else {                                                 \
+      __ CmpLogical32(offset, i.InputImmediate(2));          \
+    }                                                        \
+    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+    __ bge(ool->entry());                                    \
+    __ asm_instr(result, operand);                           \
+    __ bind(ool->exit());                                    \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ StoreFloat32(value, operand);                    \
+    __ bind(&done);                                     \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ StoreDouble(value, operand);                     \
+    __ bind(&done);                                     \
+  } while (0)
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)       \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    Register offset = operand.rb();                     \
+    __ lgfr(offset, offset);                            \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ CmpLogical32(offset, i.InputRegister(2));      \
+    } else {                                            \
+      __ CmpLogical32(offset, i.InputImmediate(2));     \
+    }                                                   \
+    __ bge(&done);                                      \
+    Register value = i.InputRegister(3);                \
+    __ asm_instr(value, operand);                       \
+    __ bind(&done);                                     \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ LeaveFrame(StackFrame::MANUAL);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta > 0) {
+    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+  }
+  frame_access_state()->SetFrameAccessToDefault();
+}
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+  if (sp_slot_delta < 0) {
+    __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
+    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+  }
+  if (frame_access_state()->has_frame()) {
+    __ RestoreFrameStateForTailCall();
+  }
+  frame_access_state()->SetFrameAccessToSP();
+}
+
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&done);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ LoadP(caller_args_count_reg,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3);
+  __ bind(&done);
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
+
+  switch (opcode) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasRegisterInput(instr, 0)) {
+        __ AddP(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Call(ip);
+      } else {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      RecordCallPosition(instr);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallCodeObjectFromJSFunction:
+    case kArchTailCallCodeObject: {
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
+      if (HasRegisterInput(instr, 0)) {
+        __ AddP(ip, i.InputRegister(0),
+                Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Jump(ip);
+      } else {
+        // We cannot use the constant pool to load the target since
+        // we've already restored the caller's frame.
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+        __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      }
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ CmpP(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+      RecordCallPosition(instr);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchTailCallJSFunctionFromJSFunction:
+    case kArchTailCallJSFunction: {
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ LoadP(kScratchReg,
+                 FieldMemOperand(func, JSFunction::kContextOffset));
+        __ CmpP(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+      AssembleDeconstructActivationRecord(stack_param_delta);
+      if (opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
+      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Jump(ip);
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchPrepareCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      __ PrepareCallCFunction(num_parameters, kScratchReg);
+      // Frame alignment requires using FP-relative frame addressing.
+      frame_access_state()->SetFrameAccessToFP();
+      break;
+    }
+    case kArchPrepareTailCall:
+      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+      break;
+    case kArchCallCFunction: {
+      int const num_parameters = MiscField::decode(instr->opcode());
+      if (instr->InputAt(0)->IsImmediate()) {
+        ExternalReference ref = i.InputExternalReference(0);
+        __ CallCFunction(ref, num_parameters);
+      } else {
+        Register func = i.InputRegister(0);
+        __ CallCFunction(func, num_parameters);
+      }
+      frame_access_state()->SetFrameAccessToDefault();
+      frame_access_state()->ClearSPDelta();
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      break;
+    case kArchLookupSwitch:
+      AssembleArchLookupSwitch(instr);
+      break;
+    case kArchTableSwitch:
+      AssembleArchTableSwitch(instr);
+      break;
+    case kArchNop:
+    case kArchThrowTerminator:
+      // don't emit code for nops.
+      break;
+    case kArchDeoptimize: {
+      int deopt_state_id =
+          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+      Deoptimizer::BailoutType bailout_type =
+          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+      break;
+    }
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ LoadRR(i.OutputRegister(), sp);
+      break;
+    case kArchFramePointer:
+      __ LoadRR(i.OutputRegister(), fp);
+      break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->has_frame()) {
+        __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ LoadRR(i.OutputRegister(), fp);
+      }
+      break;
+    case kArchTruncateDoubleToI:
+      // TODO(mbrandy): move slow call to stub out of line.
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArchStoreWithWriteBarrier: {
+      RecordWriteMode mode =
+          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+      Register object = i.InputRegister(0);
+      Register value = i.InputRegister(2);
+      Register scratch0 = i.TempRegister(0);
+      Register scratch1 = i.TempRegister(1);
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_MRI) {
+        int32_t offset = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      } else {
+        DCHECK_EQ(kMode_MRR, addressing_mode);
+        Register offset(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      }
+      __ CheckPageFlag(object, scratch0,
+                       MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+                       ool->entry());
+      __ bind(ool->exit());
+      break;
+    }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
+    case kS390_And:
+      ASSEMBLE_BINOP(AndP, AndP);
+      break;
+    case kS390_AndComplement:
+      __ NotP(i.InputRegister(1));
+      __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_Or:
+      ASSEMBLE_BINOP(OrP, OrP);
+      break;
+    case kS390_OrComplement:
+      __ NotP(i.InputRegister(1));
+      __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_Xor:
+      ASSEMBLE_BINOP(XorP, XorP);
+      break;
+    case kS390_ShiftLeft32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+      }
+      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftLeft64:
+      ASSEMBLE_BINOP(sllg, sllg);
+      break;
+#endif
+    case kS390_ShiftRight32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+      }
+      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftRight64:
+      ASSEMBLE_BINOP(srlg, srlg);
+      break;
+#endif
+    case kS390_ShiftRightArith32:
+      if (HasRegisterInput(instr, 1)) {
+        if (i.OutputRegister().is(i.InputRegister(1)) &&
+            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
+          __ LoadRR(kScratchReg, i.InputRegister(1));
+          __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
+                             kScratchReg);
+        } else {
+          ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+        }
+      } else {
+        ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+      }
+      __ LoadlW(i.OutputRegister(), i.OutputRegister());
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ShiftRightArith64:
+      ASSEMBLE_BINOP(srag, srag);
+      break;
+#endif
+#if !V8_TARGET_ARCH_S390X
+    case kS390_AddPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
+                      i.InputRegister(2));
+      __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
+                               i.InputRegister(3));
+      break;
+    case kS390_SubPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
+                      i.InputRegister(2));
+      __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
+                                i.InputRegister(3));
+      break;
+    case kS390_MulPair:
+      // i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      __ sllg(r0, i.InputRegister(1), Operand(32));
+      __ sllg(r1, i.InputRegister(3), Operand(32));
+      __ lr(r0, i.InputRegister(0));
+      __ lr(r1, i.InputRegister(2));
+      __ msgr(r1, r0);
+      __ lr(i.OutputRegister(0), r1);
+      __ srag(i.OutputRegister(1), r1, Operand(32));
+      break;
+    case kS390_ShiftLeftPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1),
+                         i.InputInt32(2));
+      } else {
+        __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
+                         i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                         i.InputRegister(2));
+      }
+      break;
+    case kS390_ShiftRightPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1),
+                          i.InputInt32(2));
+      } else {
+        __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
+                          i.InputRegister(2));
+      }
+      break;
+    case kS390_ShiftRightArithPair:
+      if (instr->InputAt(2)->IsImmediate()) {
+        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+                               i.InputRegister(0), i.InputRegister(1),
+                               i.InputInt32(2));
+      } else {
+        __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+                               i.InputRegister(0), i.InputRegister(1),
+                               kScratchReg, i.InputRegister(2));
+      }
+      break;
+#endif
+    case kS390_RotRight32:
+      if (HasRegisterInput(instr, 1)) {
+        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+        __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+      } else {
+        __ rll(i.OutputRegister(), i.InputRegister(0),
+               Operand(32 - i.InputInt32(1)));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_RotRight64:
+      if (HasRegisterInput(instr, 1)) {
+        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
+        __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
+      } else {
+        __ rllg(i.OutputRegister(), i.InputRegister(0),
+                Operand(64 - i.InputInt32(1)));
+      }
+      break;
+#endif
+    case kS390_Not:
+      __ LoadRR(i.OutputRegister(), i.InputRegister(0));
+      __ NotP(i.OutputRegister());
+      break;
+    case kS390_RotLeftAndMask32:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63 - i.InputInt32(3);
+        int startBit = 63 - i.InputInt32(2);
+        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
+                 Operand(endBit), Operand::Zero(), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBitLeft = 63 - i.InputInt32(2);
+        int clearBitRight = i.InputInt32(3);
+        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
+        __ srlg(i.OutputRegister(), i.OutputRegister(),
+                Operand((clearBitLeft + clearBitRight)));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_RotLeftAndClear64:
+      UNIMPLEMENTED();  // Find correct instruction
+      break;
+    case kS390_RotLeftAndClearLeft64:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63;
+        int startBit = 63 - i.InputInt32(2);
+        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+                 Operand(endBit), Operand(shiftAmount), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBit = 63 - i.InputInt32(2);
+        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+      }
+      break;
+    case kS390_RotLeftAndClearRight64:
+      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+        int shiftAmount = i.InputInt32(1);
+        int endBit = 63 - i.InputInt32(2);
+        int startBit = 0;
+        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+                 Operand(endBit), Operand(shiftAmount), true);
+      } else {
+        int shiftAmount = i.InputInt32(1);
+        int clearBit = i.InputInt32(2);
+        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+      }
+      break;
+#endif
+    case kS390_Add:
+#if V8_TARGET_ARCH_S390X
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_ADD_WITH_OVERFLOW();
+      } else {
+#endif
+        ASSEMBLE_BINOP(AddP, AddP);
+#if V8_TARGET_ARCH_S390X
+      }
+#endif
+      break;
+    case kS390_AddWithOverflow32:
+      ASSEMBLE_ADD_WITH_OVERFLOW32();
+      break;
+    case kS390_AddFloat:
+      // Ensure we don't clobber right/InputReg(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(aebr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_AddDouble:
+      // Ensure we don't clobber right/InputReg(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(adbr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Sub:
+#if V8_TARGET_ARCH_S390X
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_SUB_WITH_OVERFLOW();
+      } else {
+#endif
+        ASSEMBLE_BINOP(SubP, SubP);
+#if V8_TARGET_ARCH_S390X
+      }
+#endif
+      break;
+    case kS390_SubWithOverflow32:
+      ASSEMBLE_SUB_WITH_OVERFLOW32();
+      break;
+    case kS390_SubFloat:
+      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        }
+        __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_SubDouble:
+      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        }
+        __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Mul32:
+#if V8_TARGET_ARCH_S390X
+    case kS390_Mul64:
+#endif
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kS390_MulHigh32:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ mr_z(r0, i.InputRegister(1));
+      __ LoadW(i.OutputRegister(), r0);
+      break;
+    case kS390_MulHighU32:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ mlr(r0, i.InputRegister(1));
+      __ LoadlW(i.OutputRegister(), r0);
+      break;
+    case kS390_MulFloat:
+      // Ensure we don't clobber right
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(meebr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_MulDouble:
+      // Ensure we don't clobber right
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        ASSEMBLE_FLOAT_UNOP(mdbr);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Div64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
+      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      break;
+#endif
+    case kS390_Div32:
+      __ LoadRR(r0, i.InputRegister(0));
+      __ srda(r0, Operand(32));
+      __ dr(r0, i.InputRegister(1));
+      __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
+                                r1);  // Copy R1: Quotient to output
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_DivU64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ LoadImmP(r0, Operand::Zero());
+      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      break;
+#endif
+    case kS390_DivU32:
+      __ LoadRR(r0, i.InputRegister(0));
+      __ srdl(r0, Operand(32));
+      __ dlr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ LoadlW(i.OutputRegister(), r1);  // Copy R1: Quotient to output
+      __ LoadAndTestP_ExtendSrc(r1, r1);
+      break;
+
+    case kS390_DivFloat:
+      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_DivDouble:
+      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
+      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
+        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
+        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
+      } else {
+        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
+          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+        __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+      }
+      break;
+    case kS390_Mod32:
+      ASSEMBLE_MODULO(dr, srda);
+      break;
+    case kS390_ModU32:
+      ASSEMBLE_MODULO(dlr, srdl);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Mod64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
+      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
+      break;
+    case kS390_ModU64:
+      __ LoadRR(r1, i.InputRegister(0));
+      __ LoadImmP(r0, Operand::Zero());
+      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
+      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
+      break;
+#endif
+    case kS390_AbsFloat:
+      __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_SqrtFloat:
+      ASSEMBLE_FLOAT_UNOP(sqebr);
+      break;
+    case kS390_FloorFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+      break;
+    case kS390_CeilFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+      break;
+    case kS390_TruncateFloat:
+      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+      break;
+    //  Double operations
+    case kS390_ModDouble:
+      ASSEMBLE_FLOAT_MODULO();
+      break;
+    case kS390_Neg:
+      __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_MaxDouble:
+      ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
+      break;
+    case kS390_MinDouble:
+      ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
+      break;
+    case kS390_AbsDouble:
+      __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_SqrtDouble:
+      ASSEMBLE_FLOAT_UNOP(sqdbr);
+      break;
+    case kS390_FloorDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
+      break;
+    case kS390_CeilDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
+      break;
+    case kS390_TruncateDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
+      break;
+    case kS390_RoundDouble:
+      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
+      break;
+    case kS390_NegDouble:
+      ASSEMBLE_FLOAT_UNOP(lcdbr);
+      break;
+    case kS390_Cntlz32: {
+      __ llgfr(i.OutputRegister(), i.InputRegister(0));
+      __ flogr(r0, i.OutputRegister());
+      __ LoadRR(i.OutputRegister(), r0);
+      __ SubP(i.OutputRegister(), Operand(32));
+    } break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Cntlz64: {
+      __ flogr(r0, i.InputRegister(0));
+      __ LoadRR(i.OutputRegister(), r0);
+    } break;
+#endif
+    case kS390_Popcnt32:
+      __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Popcnt64:
+      __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
+      break;
+#endif
+    case kS390_Cmp32:
+      ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Cmp64:
+      ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
+      break;
+#endif
+    case kS390_CmpFloat:
+      __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kS390_CmpDouble:
+      __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kS390_Tst32:
+      if (HasRegisterInput(instr, 1)) {
+        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+      } else {
+        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+      __ LoadAndTestP_ExtendSrc(r0, r0);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_Tst64:
+      if (HasRegisterInput(instr, 1)) {
+        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+      } else {
+        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+      }
+      break;
+#endif
+    case kS390_Push:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+        __ lay(sp, MemOperand(sp, -kDoubleSize));
+        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+      } else {
+        __ Push(i.InputRegister(0));
+        frame_access_state()->IncreaseSPDelta(1);
+      }
+      break;
+    case kS390_PushFrame: {
+      int num_slots = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0),
+                       MemOperand(sp, -num_slots * kPointerSize));
+      } else {
+        __ StoreP(i.InputRegister(0),
+                  MemOperand(sp, -num_slots * kPointerSize));
+      }
+      __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
+      break;
+    }
+    case kS390_StoreToStackSlot: {
+      int slot = i.InputInt32(1);
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ StoreDouble(i.InputDoubleRegister(0),
+                       MemOperand(sp, slot * kPointerSize));
+      } else {
+        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+      }
+      break;
+    }
+    case kS390_ExtendSignWord8:
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.InputRegister(0));
+#else
+      __ lbr(i.OutputRegister(), i.InputRegister(0));
+#endif
+      break;
+    case kS390_ExtendSignWord16:
+#if V8_TARGET_ARCH_S390X
+      __ lghr(i.OutputRegister(), i.InputRegister(0));
+#else
+      __ lhr(i.OutputRegister(), i.InputRegister(0));
+#endif
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_ExtendSignWord32:
+      __ lgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Uint32ToUint64:
+      // Zero extend
+      __ llgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Int64ToInt32:
+      // sign extend
+      __ lgfr(i.OutputRegister(), i.InputRegister(0));
+      break;
+    case kS390_Int64ToFloat32:
+      __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Int64ToDouble:
+      __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Uint64ToFloat32:
+      __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+                                     i.OutputDoubleRegister());
+      break;
+    case kS390_Uint64ToDouble:
+      __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+                                      i.OutputDoubleRegister());
+      break;
+#endif
+    case kS390_Int32ToFloat32:
+      __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Int32ToDouble:
+      __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+      break;
+    case kS390_Uint32ToFloat32:
+      __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+                                   i.OutputDoubleRegister());
+      break;
+    case kS390_Uint32ToDouble:
+      __ ConvertUnsignedIntToDouble(i.InputRegister(0),
+                                    i.OutputDoubleRegister());
+      break;
+    case kS390_DoubleToInt32:
+    case kS390_DoubleToUint32:
+    case kS390_DoubleToInt64: {
+#if V8_TARGET_ARCH_S390X
+      bool check_conversion =
+          (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
+#endif
+      __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+                              kScratchReg,
+#endif
+                              i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+#endif
+      break;
+    }
+    case kS390_Float32ToInt32: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
+                               kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+    case kS390_Float32ToUint32: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
+                                       i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_S390X
+    case kS390_Float32ToUint64: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
+                                       i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#endif
+    case kS390_Float32ToInt64: {
+#if V8_TARGET_ARCH_S390X
+      bool check_conversion =
+          (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
+#endif
+      __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
+#if !V8_TARGET_ARCH_S390X
+                               kScratchReg,
+#endif
+                               i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_S390X
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+#endif
+      break;
+    }
+#if V8_TARGET_ARCH_S390X
+    case kS390_DoubleToUint64: {
+      bool check_conversion = (i.OutputCount() > 1);
+      __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+                                      i.OutputRegister(0), kScratchDoubleReg);
+      if (check_conversion) {
+        Label conversion_done;
+        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
+        __ b(Condition(1), &conversion_done);  // special case
+        __ LoadImmP(i.OutputRegister(1), Operand(1));
+        __ bind(&conversion_done);
+      }
+      break;
+    }
+#endif
+    case kS390_DoubleToFloat32:
+      __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_Float32ToDouble:
+      __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_DoubleExtractLowWord32:
+      // TODO(john.yan): this can cause problem when interrupting,
+      //                 use freg->greg instruction
+      __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+      __ LoadlW(i.OutputRegister(),
+                MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
+      break;
+    case kS390_DoubleExtractHighWord32:
+      // TODO(john.yan): this can cause problem when interrupting,
+      //                 use freg->greg instruction
+      __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+      __ LoadlW(i.OutputRegister(),
+                MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
+      break;
+    case kS390_DoubleInsertLowWord32:
+      __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1));
+      break;
+    case kS390_DoubleInsertHighWord32:
+      __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+      break;
+    case kS390_DoubleConstruct:
+// TODO(john.yan): this can cause problem when interrupting,
+//                 use greg->freg instruction
+#if V8_TARGET_LITTLE_ENDIAN
+      __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize / 2));
+      __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize));
+#else
+      __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize / 2));
+      __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize));
+#endif
+      __ ldy(i.OutputDoubleRegister(), MemOperand(sp, -kDoubleSize));
+      break;
+    case kS390_LoadWordS8:
+      ASSEMBLE_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+      __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+      break;
+    case kS390_BitcastFloat32ToInt32:
+      __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_BitcastInt32ToFloat32:
+      __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_BitcastDoubleToInt64:
+      __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kS390_BitcastInt64ToDouble:
+      __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+#endif
+    case kS390_LoadWordU8:
+      ASSEMBLE_LOAD_INTEGER(LoadlB);
+      break;
+    case kS390_LoadWordU16:
+      ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
+      break;
+    case kS390_LoadWordS16:
+      ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
+      break;
+    case kS390_LoadWordS32:
+      ASSEMBLE_LOAD_INTEGER(LoadW);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_LoadWord64:
+      ASSEMBLE_LOAD_INTEGER(lg);
+      break;
+#endif
+    case kS390_LoadFloat32:
+      ASSEMBLE_LOAD_FLOAT(LoadFloat32);
+      break;
+    case kS390_LoadDouble:
+      ASSEMBLE_LOAD_FLOAT(LoadDouble);
+      break;
+    case kS390_StoreWord8:
+      ASSEMBLE_STORE_INTEGER(StoreByte);
+      break;
+    case kS390_StoreWord16:
+      ASSEMBLE_STORE_INTEGER(StoreHalfWord);
+      break;
+    case kS390_StoreWord32:
+      ASSEMBLE_STORE_INTEGER(StoreW);
+      break;
+#if V8_TARGET_ARCH_S390X
+    case kS390_StoreWord64:
+      ASSEMBLE_STORE_INTEGER(StoreP);
+      break;
+#endif
+    case kS390_StoreFloat32:
+      ASSEMBLE_STORE_FLOAT32();
+      break;
+    case kS390_StoreDouble:
+      ASSEMBLE_STORE_DOUBLE();
+      break;
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+#if V8_TARGET_ARCH_S390X
+      __ lgbr(i.OutputRegister(), i.OutputRegister());
+#else
+      __ lbr(i.OutputRegister(), i.OutputRegister());
+#endif
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+      break;
+    case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_S390X
+      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
+      break;
+    case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_S390X
+      ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
+#else
+      UNREACHABLE();
+#endif
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT32();
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_DOUBLE();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}  // NOLINT(readability/fn_size)
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  S390OperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  ArchOpcode op = instr->arch_opcode();
+  FlagsCondition condition = branch->condition;
+
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kS390_CmpDouble) {
+    // check for unordered if necessary
+    // Branching to flabel/tlabel according to what's expected by tests
+    if (cond == le || cond == eq || cond == lt) {
+      __ bunordered(flabel);
+    } else if (cond == gt || cond == ne || cond == ge) {
+      __ bunordered(tlabel);
+    }
+  }
+  __ b(cond, tlabel);
+  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
+}
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  S390OperandConverter i(this, instr);
+  Label done;
+  ArchOpcode op = instr->arch_opcode();
+  bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
+
+  // Overflow checked for add/sub only.
+  DCHECK((condition != kOverflow && condition != kNotOverflow) ||
+         (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
+         (op == kS390_Add || op == kS390_Sub));
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  DCHECK_NE(0u, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cond = FlagsConditionToCondition(condition, op);
+  switch (cond) {
+    case ne:
+    case ge:
+    case gt:
+      if (check_unordered) {
+        __ LoadImmP(reg, Operand(1));
+        __ LoadImmP(kScratchReg, Operand::Zero());
+        __ bunordered(&done);
+        Label cond_true;
+        __ b(cond, &cond_true, Label::kNear);
+        __ LoadRR(reg, kScratchReg);
+        __ bind(&cond_true);
+      } else {
+        Label cond_true, done_here;
+        __ LoadImmP(reg, Operand(1));
+        __ b(cond, &cond_true, Label::kNear);
+        __ LoadImmP(reg, Operand::Zero());
+        __ bind(&cond_true);
+      }
+      break;
+    case eq:
+    case lt:
+    case le:
+      if (check_unordered) {
+        __ LoadImmP(reg, Operand::Zero());
+        __ LoadImmP(kScratchReg, Operand(1));
+        __ bunordered(&done);
+        Label cond_false;
+        __ b(NegateCondition(cond), &cond_false, Label::kNear);
+        __ LoadRR(reg, kScratchReg);
+        __ bind(&cond_false);
+      } else {
+        __ LoadImmP(reg, Operand::Zero());
+        Label cond_false;
+        __ b(NegateCondition(cond), &cond_false, Label::kNear);
+        __ LoadImmP(reg, Operand(1));
+        __ bind(&cond_false);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ bind(&done);
+}
+
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  for (size_t index = 2; index < instr->InputCount(); index += 2) {
+    __ CmpP(input, Operand(i.InputInt32(index + 0)));
+    __ beq(GetLabel(i.InputRpo(index + 1)));
+  }
+  AssembleArchJump(i.InputRpo(1));
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+  S390OperandConverter i(this, instr);
+  Register input = i.InputRegister(0);
+  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+  Label** cases = zone()->NewArray<Label*>(case_count);
+  for (int32_t index = 0; index < case_count; ++index) {
+    cases[index] = GetLabel(i.InputRpo(index + 2));
+  }
+  Label* const table = AddJumpTable(cases, case_count);
+  __ CmpLogicalP(input, Operand(case_count));
+  __ bge(GetLabel(i.InputRpo(1)));
+  __ larl(kScratchReg, table);
+  __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
+  __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
+  __ Jump(kScratchReg);
+}
+
+void CodeGenerator::AssembleDeoptimizerCall(
+    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, bailout_type);
+  // TODO(turbofan): We should be able to generate better code by sharing the
+  // actual final call site and just bl'ing to it here, similar to what we do
+  // in the lithium backend.
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ Push(r14, fp);
+      __ LoadRR(fp, sp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+    } else {
+      StackFrame::Type type = info()->GetOutputStackFrameType();
+      // TODO(mbrandy): Detect cases where ip is the entrypoint (for
+      // efficient intialization of the constant pool pointer register).
+      __ StubPrologue(type);
+    }
+  }
+
+  int stack_shrink_slots = frame()->GetSpillSlotCount();
+  if (info()->is_osr()) {
+    // TurboFan OSR-compiled functions cannot be entered directly.
+    __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+    // Unoptimized code jumps directly to this entrypoint while the unoptimized
+    // frame is still on the stack. Optimized code uses OSR values directly from
+    // the unoptimized frame. Thus, all that needs to be done is to allocate the
+    // remaining stack slots.
+    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+    osr_pc_offset_ = __ pc_offset();
+    stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+  }
+
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+  }
+  if (stack_shrink_slots > 0) {
+    __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+  }
+
+  // Save callee-saved Double registers.
+  if (double_saves != 0) {
+    __ MultiPushDoubles(double_saves);
+    DCHECK(kNumCalleeSavedDoubles ==
+           base::bits::CountPopulation32(double_saves));
+    frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+                                              (kDoubleSize / kPointerSize));
+  }
+
+  // Save callee-saved registers.
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPush(saves);
+    // register save area does not include the fp or constant pool pointer.
+    const int num_saves =
+        kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+    DCHECK(num_saves == base::bits::CountPopulation32(saves));
+    frame()->AllocateSavedCalleeRegisterSlots(num_saves);
+  }
+}
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+  // Restore registers.
+  const RegList saves = descriptor->CalleeSavedRegisters();
+  if (saves != 0) {
+    __ MultiPop(saves);
+  }
+
+  // Restore double registers.
+  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+  if (double_saves != 0) {
+    __ MultiPopDoubles(double_saves);
+  }
+
+  if (descriptor->IsCFunctionCall()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
+    // Canonicalize JSFunction return sites for now.
+    if (return_label_.is_bound()) {
+      __ b(&return_label_);
+      return;
+    } else {
+      __ bind(&return_label_);
+      AssembleDeconstructFrame();
+    }
+  }
+  __ Ret(pop_count);
+}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  S390OperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Move(g.ToRegister(destination), src);
+    } else {
+      __ StoreP(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ LoadP(temp, src, r0);
+      __ StoreP(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          __ mov(dst, Operand(src.ToInt64()));
+          break;
+        case Constant::kFloat32:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject: {
+          Handle<HeapObject> src_object = src.ToHeapObject();
+          Heap::RootListIndex index;
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ LoadP(dst, g.SlotToMemOperand(slot));
+          } else if (IsMaterializableFromRoot(src_object, &index)) {
+            __ LoadRoot(dst, index);
+          } else {
+            __ Move(dst, src_object);
+          }
+          break;
+        }
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ StoreP(dst, g.ToMemOperand(destination), r0);
+      }
+    } else {
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+                                                        : src.ToFloat64();
+      if (src.type() == Constant::kFloat32) {
+        __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
+      } else {
+        __ LoadDoubleLiteral(dst, value, kScratchReg);
+      }
+
+      if (destination->IsDoubleStackSlot()) {
+        __ StoreDouble(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ StoreDouble(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ LoadDouble(g.ToDoubleRegister(destination), src);
+    } else {
+      DoubleRegister temp = kScratchDoubleReg;
+      __ LoadDouble(temp, src);
+      __ StoreDouble(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  S390OperandConverter g(this, nullptr);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ LoadRR(temp, src);
+      __ LoadRR(src, dst);
+      __ LoadRR(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ LoadRR(temp, src);
+      __ LoadP(src, dst);
+      __ StoreP(temp, dst);
+    }
+#if V8_TARGET_ARCH_S390X
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+#else
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+#endif
+    Register temp_0 = kScratchReg;
+    Register temp_1 = r0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ LoadP(temp_0, src);
+    __ LoadP(temp_1, dst);
+    __ StoreP(temp_0, dst);
+    __ StoreP(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister temp = kScratchDoubleReg;
+    DoubleRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DoubleRegister dst = g.ToDoubleRegister(destination);
+      __ ldr(temp, src);
+      __ ldr(src, dst);
+      __ ldr(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ ldr(temp, src);
+      __ LoadDouble(src, dst);
+      __ StoreDouble(temp, dst);
+    }
+#if !V8_TARGET_ARCH_S390X
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    DoubleRegister temp_0 = kScratchDoubleReg;
+    DoubleRegister temp_1 = d0;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    // TODO(joransiu): MVC opportunity
+    __ LoadDouble(temp_0, src);
+    __ LoadDouble(temp_1, dst);
+    __ StoreDouble(temp_0, dst);
+    __ StoreDouble(temp_1, src);
+#endif
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+  for (size_t index = 0; index < target_count; ++index) {
+    __ emit_label_addr(targets[index]);
+  }
+}
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // We do not insert nops for inlined Smi code.
+}
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+    return;
+  }
+
+  int space_needed = Deoptimizer::patch_size();
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+    DCHECK_EQ(0, padding_size % 2);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= 2;
+    }
+  }
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
new file mode 100644
index 0000000..a32f875
--- /dev/null
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -0,0 +1,160 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+#define V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// S390-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(S390_And)                      \
+  V(S390_AndComplement)            \
+  V(S390_Or)                       \
+  V(S390_OrComplement)             \
+  V(S390_Xor)                      \
+  V(S390_ShiftLeft32)              \
+  V(S390_ShiftLeft64)              \
+  V(S390_ShiftLeftPair)            \
+  V(S390_ShiftRight32)             \
+  V(S390_ShiftRight64)             \
+  V(S390_ShiftRightPair)           \
+  V(S390_ShiftRightArith32)        \
+  V(S390_ShiftRightArith64)        \
+  V(S390_ShiftRightArithPair)      \
+  V(S390_RotRight32)               \
+  V(S390_RotRight64)               \
+  V(S390_Not)                      \
+  V(S390_RotLeftAndMask32)         \
+  V(S390_RotLeftAndClear64)        \
+  V(S390_RotLeftAndClearLeft64)    \
+  V(S390_RotLeftAndClearRight64)   \
+  V(S390_Add)                      \
+  V(S390_AddWithOverflow32)        \
+  V(S390_AddPair)                  \
+  V(S390_AddFloat)                 \
+  V(S390_AddDouble)                \
+  V(S390_Sub)                      \
+  V(S390_SubWithOverflow32)        \
+  V(S390_SubFloat)                 \
+  V(S390_SubDouble)                \
+  V(S390_SubPair)                  \
+  V(S390_MulPair)                  \
+  V(S390_Mul32)                    \
+  V(S390_Mul64)                    \
+  V(S390_MulHigh32)                \
+  V(S390_MulHighU32)               \
+  V(S390_MulFloat)                 \
+  V(S390_MulDouble)                \
+  V(S390_Div32)                    \
+  V(S390_Div64)                    \
+  V(S390_DivU32)                   \
+  V(S390_DivU64)                   \
+  V(S390_DivFloat)                 \
+  V(S390_DivDouble)                \
+  V(S390_Mod32)                    \
+  V(S390_Mod64)                    \
+  V(S390_ModU32)                   \
+  V(S390_ModU64)                   \
+  V(S390_ModDouble)                \
+  V(S390_Neg)                      \
+  V(S390_NegDouble)                \
+  V(S390_SqrtFloat)                \
+  V(S390_FloorFloat)               \
+  V(S390_CeilFloat)                \
+  V(S390_TruncateFloat)            \
+  V(S390_AbsFloat)                 \
+  V(S390_SqrtDouble)               \
+  V(S390_FloorDouble)              \
+  V(S390_CeilDouble)               \
+  V(S390_TruncateDouble)           \
+  V(S390_RoundDouble)              \
+  V(S390_MaxDouble)                \
+  V(S390_MinDouble)                \
+  V(S390_AbsDouble)                \
+  V(S390_Cntlz32)                  \
+  V(S390_Cntlz64)                  \
+  V(S390_Popcnt32)                 \
+  V(S390_Popcnt64)                 \
+  V(S390_Cmp32)                    \
+  V(S390_Cmp64)                    \
+  V(S390_CmpFloat)                 \
+  V(S390_CmpDouble)                \
+  V(S390_Tst32)                    \
+  V(S390_Tst64)                    \
+  V(S390_Push)                     \
+  V(S390_PushFrame)                \
+  V(S390_StoreToStackSlot)         \
+  V(S390_ExtendSignWord8)          \
+  V(S390_ExtendSignWord16)         \
+  V(S390_ExtendSignWord32)         \
+  V(S390_Uint32ToUint64)           \
+  V(S390_Int64ToInt32)             \
+  V(S390_Int64ToFloat32)           \
+  V(S390_Int64ToDouble)            \
+  V(S390_Uint64ToFloat32)          \
+  V(S390_Uint64ToDouble)           \
+  V(S390_Int32ToFloat32)           \
+  V(S390_Int32ToDouble)            \
+  V(S390_Uint32ToFloat32)          \
+  V(S390_Uint32ToDouble)           \
+  V(S390_Float32ToInt64)           \
+  V(S390_Float32ToUint64)          \
+  V(S390_Float32ToInt32)           \
+  V(S390_Float32ToUint32)          \
+  V(S390_Float32ToDouble)          \
+  V(S390_DoubleToInt32)            \
+  V(S390_DoubleToUint32)           \
+  V(S390_DoubleToInt64)            \
+  V(S390_DoubleToUint64)           \
+  V(S390_DoubleToFloat32)          \
+  V(S390_DoubleExtractLowWord32)   \
+  V(S390_DoubleExtractHighWord32)  \
+  V(S390_DoubleInsertLowWord32)    \
+  V(S390_DoubleInsertHighWord32)   \
+  V(S390_DoubleConstruct)          \
+  V(S390_BitcastInt32ToFloat32)    \
+  V(S390_BitcastFloat32ToInt32)    \
+  V(S390_BitcastInt64ToDouble)     \
+  V(S390_BitcastDoubleToInt64)     \
+  V(S390_LoadWordS8)               \
+  V(S390_LoadWordU8)               \
+  V(S390_LoadWordS16)              \
+  V(S390_LoadWordU16)              \
+  V(S390_LoadWordS32)              \
+  V(S390_LoadWord64)               \
+  V(S390_LoadFloat32)              \
+  V(S390_LoadDouble)               \
+  V(S390_StoreWord8)               \
+  V(S390_StoreWord16)              \
+  V(S390_StoreWord32)              \
+  V(S390_StoreWord64)              \
+  V(S390_StoreFloat32)             \
+  V(S390_StoreDouble)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_S390_INSTRUCTION_CODES_S390_H_
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
new file mode 100644
index 0000000..2d98e11
--- /dev/null
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -0,0 +1,163 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+    const Instruction* instr) const {
+  switch (instr->arch_opcode()) {
+    case kS390_And:
+    case kS390_AndComplement:
+    case kS390_Or:
+    case kS390_OrComplement:
+    case kS390_Xor:
+    case kS390_ShiftLeft32:
+    case kS390_ShiftLeft64:
+    case kS390_ShiftLeftPair:
+    case kS390_ShiftRight32:
+    case kS390_ShiftRight64:
+    case kS390_ShiftRightPair:
+    case kS390_ShiftRightArith32:
+    case kS390_ShiftRightArith64:
+    case kS390_ShiftRightArithPair:
+    case kS390_RotRight32:
+    case kS390_RotRight64:
+    case kS390_Not:
+    case kS390_RotLeftAndMask32:
+    case kS390_RotLeftAndClear64:
+    case kS390_RotLeftAndClearLeft64:
+    case kS390_RotLeftAndClearRight64:
+    case kS390_Add:
+    case kS390_AddWithOverflow32:
+    case kS390_AddPair:
+    case kS390_AddFloat:
+    case kS390_AddDouble:
+    case kS390_Sub:
+    case kS390_SubWithOverflow32:
+    case kS390_SubPair:
+    case kS390_MulPair:
+    case kS390_SubFloat:
+    case kS390_SubDouble:
+    case kS390_Mul32:
+    case kS390_Mul64:
+    case kS390_MulHigh32:
+    case kS390_MulHighU32:
+    case kS390_MulFloat:
+    case kS390_MulDouble:
+    case kS390_Div32:
+    case kS390_Div64:
+    case kS390_DivU32:
+    case kS390_DivU64:
+    case kS390_DivFloat:
+    case kS390_DivDouble:
+    case kS390_Mod32:
+    case kS390_Mod64:
+    case kS390_ModU32:
+    case kS390_ModU64:
+    case kS390_ModDouble:
+    case kS390_Neg:
+    case kS390_NegDouble:
+    case kS390_SqrtFloat:
+    case kS390_FloorFloat:
+    case kS390_CeilFloat:
+    case kS390_TruncateFloat:
+    case kS390_AbsFloat:
+    case kS390_SqrtDouble:
+    case kS390_FloorDouble:
+    case kS390_CeilDouble:
+    case kS390_TruncateDouble:
+    case kS390_RoundDouble:
+    case kS390_MaxDouble:
+    case kS390_MinDouble:
+    case kS390_AbsDouble:
+    case kS390_Cntlz32:
+    case kS390_Cntlz64:
+    case kS390_Popcnt32:
+    case kS390_Popcnt64:
+    case kS390_Cmp32:
+    case kS390_Cmp64:
+    case kS390_CmpFloat:
+    case kS390_CmpDouble:
+    case kS390_Tst32:
+    case kS390_Tst64:
+    case kS390_ExtendSignWord8:
+    case kS390_ExtendSignWord16:
+    case kS390_ExtendSignWord32:
+    case kS390_Uint32ToUint64:
+    case kS390_Int64ToInt32:
+    case kS390_Int64ToFloat32:
+    case kS390_Int64ToDouble:
+    case kS390_Uint64ToFloat32:
+    case kS390_Uint64ToDouble:
+    case kS390_Int32ToFloat32:
+    case kS390_Int32ToDouble:
+    case kS390_Uint32ToFloat32:
+    case kS390_Uint32ToDouble:
+    case kS390_Float32ToInt32:
+    case kS390_Float32ToUint32:
+    case kS390_Float32ToUint64:
+    case kS390_Float32ToDouble:
+    case kS390_DoubleToInt32:
+    case kS390_DoubleToUint32:
+    case kS390_Float32ToInt64:
+    case kS390_DoubleToInt64:
+    case kS390_DoubleToUint64:
+    case kS390_DoubleToFloat32:
+    case kS390_DoubleExtractLowWord32:
+    case kS390_DoubleExtractHighWord32:
+    case kS390_DoubleInsertLowWord32:
+    case kS390_DoubleInsertHighWord32:
+    case kS390_DoubleConstruct:
+    case kS390_BitcastInt32ToFloat32:
+    case kS390_BitcastFloat32ToInt32:
+    case kS390_BitcastInt64ToDouble:
+    case kS390_BitcastDoubleToInt64:
+      return kNoOpcodeFlags;
+
+    case kS390_LoadWordS8:
+    case kS390_LoadWordU8:
+    case kS390_LoadWordS16:
+    case kS390_LoadWordU16:
+    case kS390_LoadWordS32:
+    case kS390_LoadWord64:
+    case kS390_LoadFloat32:
+    case kS390_LoadDouble:
+      return kIsLoadOperation;
+
+    case kS390_StoreWord8:
+    case kS390_StoreWord16:
+    case kS390_StoreWord32:
+    case kS390_StoreWord64:
+    case kS390_StoreFloat32:
+    case kS390_StoreDouble:
+    case kS390_Push:
+    case kS390_PushFrame:
+    case kS390_StoreToStackSlot:
+      return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+      COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+      // Already covered in architecture independent code.
+      UNREACHABLE();
+  }
+
+  UNREACHABLE();
+  return kNoOpcodeFlags;
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+  // TODO(all): Add instruction cost modeling.
+  return 1;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
new file mode 100644
index 0000000..8a4af5e
--- /dev/null
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -0,0 +1,1769 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/adapters.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/s390/frames-s390.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kInt16Imm,
+  kInt16Imm_Unsigned,
+  kInt16Imm_Negate,
+  kInt16Imm_4ByteAligned,
+  kShift32Imm,
+  kShift64Imm,
+  kNoImmediate
+};
+
+// Adds S390-specific methods for generating operands.
+class S390OperandGenerator final : public OperandGenerator {
+ public:
+  explicit S390OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    return CanBeImmediate(value, mode);
+  }
+
+  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
+    switch (mode) {
+      case kInt16Imm:
+        return is_int16(value);
+      case kInt16Imm_Unsigned:
+        return is_uint16(value);
+      case kInt16Imm_Negate:
+        return is_int16(-value);
+      case kInt16Imm_4ByteAligned:
+        return is_int16(value) && !(value & 3);
+      case kShift32Imm:
+        return 0 <= value && value < 32;
+      case kShift64Imm:
+        return 0 <= value && value < 64;
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+};
+
+namespace {
+
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+              ImmediateMode operand_mode) {
+  S390OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  S390OperandGenerator g(selector);
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+  outputs[output_count++] = g.DefineAsRegister(node);
+
+  Node* success_output = NodeProperties::FindProjection(node, 1);
+  if (success_output) {
+    outputs[output_count++] = g.DefineAsRegister(success_output);
+  }
+
+  selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode,
+                FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Matcher m(node);
+  InstructionOperand inputs[4];
+  size_t input_count = 0;
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0u, input_count);
+  DCHECK_NE(0u, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
+}
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                ImmediateMode operand_mode) {
+  FlagsContinuation cont;
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+}  // namespace
+
+void InstructionSelector::VisitLoad(Node* node) {
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  ArchOpcode opcode = kArchNop;
+  ImmediateMode mode = kInt16Imm;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kS390_LoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kS390_LoadDouble;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+      break;
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTagged:  // Fall through.
+#endif
+    case MachineRepresentation::kWord32:
+      opcode = kS390_LoadWordS32;
+#if V8_TARGET_ARCH_S390X
+      // TODO(john.yan): Remove this mode since s390 do not has this restriction
+      mode = kInt16Imm_4ByteAligned;
+#endif
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kS390_LoadWord64;
+      mode = kInt16Imm_4ByteAligned;
+      break;
+#else
+    case MachineRepresentation::kWord64:    // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(offset, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
+  } else if (g.CanBeImmediate(base, mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
+  }
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+  S390OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* offset = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+  MachineRepresentation rep = store_rep.representation();
+
+  if (write_barrier_kind != kNoWriteBarrier) {
+    DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
+    InstructionOperand inputs[3];
+    size_t input_count = 0;
+    inputs[input_count++] = g.UseUniqueRegister(base);
+    // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_S390X
+        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+            ) {
+      inputs[input_count++] = g.UseImmediate(offset);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(offset);
+      addressing_mode = kMode_MRR;
+    }
+    inputs[input_count++] = g.UseUniqueRegister(value);
+    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+    switch (write_barrier_kind) {
+      case kNoWriteBarrier:
+        UNREACHABLE();
+        break;
+      case kMapWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsMap;
+        break;
+      case kPointerWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsPointer;
+        break;
+      case kFullWriteBarrier:
+        record_write_mode = RecordWriteMode::kValueIsAny;
+        break;
+    }
+    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+    size_t const temp_count = arraysize(temps);
+    InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
+    code |= MiscField::encode(static_cast<int>(record_write_mode));
+    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+  } else {
+    ArchOpcode opcode = kArchNop;
+    ImmediateMode mode = kInt16Imm;
+    switch (rep) {
+      case MachineRepresentation::kFloat32:
+        opcode = kS390_StoreFloat32;
+        break;
+      case MachineRepresentation::kFloat64:
+        opcode = kS390_StoreDouble;
+        break;
+      case MachineRepresentation::kBit:  // Fall through.
+      case MachineRepresentation::kWord8:
+        opcode = kS390_StoreWord8;
+        break;
+      case MachineRepresentation::kWord16:
+        opcode = kS390_StoreWord16;
+        break;
+#if !V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTagged:  // Fall through.
+#endif
+      case MachineRepresentation::kWord32:
+        opcode = kS390_StoreWord32;
+        break;
+#if V8_TARGET_ARCH_S390X
+      case MachineRepresentation::kTagged:  // Fall through.
+      case MachineRepresentation::kWord64:
+        opcode = kS390_StoreWord64;
+        mode = kInt16Imm_4ByteAligned;
+        break;
+#else
+      case MachineRepresentation::kWord64:  // Fall through.
+#endif
+      case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kNone:
+        UNREACHABLE();
+        return;
+    }
+    if (g.CanBeImmediate(offset, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+    } else if (g.CanBeImmediate(base, mode)) {
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+           g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+    } else {
+      Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+           g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+    }
+  }
+}
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedLoadWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode),
+       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned));
+}
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
+  S390OperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode = kArchNop;
+  switch (rep) {
+    case MachineRepresentation::kWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case MachineRepresentation::kWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:
+      opcode = kCheckedStoreWord64;
+      break;
+#endif
+    case MachineRepresentation::kFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    case MachineRepresentation::kBit:     // Fall through.
+    case MachineRepresentation::kTagged:  // Fall through.
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return;
+  }
+  AddressingMode addressingMode = kMode_MRR;
+  Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
+       g.UseRegister(base), g.UseRegister(offset),
+       g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
+}
+
+template <typename Matcher>
+static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
+                         ArchOpcode opcode, bool left_can_cover,
+                         bool right_can_cover, ImmediateMode imm_mode) {
+  S390OperandGenerator g(selector);
+
+  // Map instruction to equivalent operation with inverted right input.
+  ArchOpcode inv_opcode = opcode;
+  switch (opcode) {
+    case kS390_And:
+      inv_opcode = kS390_AndComplement;
+      break;
+    case kS390_Or:
+      inv_opcode = kS390_OrComplement;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
+  if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
+    Matcher mleft(m->left().node());
+    if (mleft.right().Is(-1)) {
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->right().node()),
+                     g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+
+  // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
+  if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
+      right_can_cover) {
+    Matcher mright(m->right().node());
+    if (mright.right().Is(-1)) {
+      // TODO(all): support shifted operand on right.
+      selector->Emit(inv_opcode, g.DefineAsRegister(node),
+                     g.UseRegister(m->left().node()),
+                     g.UseRegister(mright.left().node()));
+      return;
+    }
+  }
+
+  VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+}
+
+static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation32(value);
+  int mask_msb = base::bits::CountLeadingZeros32(value);
+  int mask_lsb = base::bits::CountTrailingZeros32(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+
+#if V8_TARGET_ARCH_S390X
+static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
+  int mask_width = base::bits::CountPopulation64(value);
+  int mask_msb = base::bits::CountLeadingZeros64(value);
+  int mask_lsb = base::bits::CountTrailingZeros64(value);
+  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
+    return false;
+  *mb = mask_lsb + mask_width - 1;
+  *me = mask_lsb;
+  return true;
+}
+#endif
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
+        CanCover(node, left)) {
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 31)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord32Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 31 - sh) mb = 31 - sh;
+          sh = (32 - sh) & 0x1f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+           g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
+           g.TempImmediate(me));
+      return;
+    }
+  }
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kS390_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64And(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  int mb = 0;
+  int me = 0;
+  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
+    int sh = 0;
+    Node* left = m.left().node();
+    if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
+        CanCover(node, left)) {
+      Int64BinopMatcher mleft(m.left().node());
+      if (mleft.right().IsInRange(0, 63)) {
+        left = mleft.left().node();
+        sh = mleft.right().Value();
+        if (m.left().IsWord64Shr()) {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (mb > 63 - sh) mb = 63 - sh;
+          sh = (64 - sh) & 0x3f;
+        } else {
+          // Adjust the mask such that it doesn't include any rotated bits.
+          if (me < sh) me = sh;
+        }
+      }
+    }
+    if (mb >= me) {
+      bool match = false;
+      ArchOpcode opcode;
+      int mask;
+      if (me == 0) {
+        match = true;
+        opcode = kS390_RotLeftAndClearLeft64;
+        mask = mb;
+      } else if (mb == 63) {
+        match = true;
+        opcode = kS390_RotLeftAndClearRight64;
+        mask = me;
+      } else if (sh && me <= sh && m.left().IsWord64Shl()) {
+        match = true;
+        opcode = kS390_RotLeftAndClear64;
+        mask = mb;
+      }
+      if (match) {
+        Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+             g.TempImmediate(sh), g.TempImmediate(mask));
+        return;
+      }
+    }
+  }
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kS390_And, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  Int32BinopMatcher m(node);
+  VisitLogical<Int32BinopMatcher>(
+      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Or(Node* node) {
+  Int64BinopMatcher m(node);
+  VisitLogical<Int64BinopMatcher>(
+      this, node, &m, kS390_Or, CanCover(node, m.left().node()),
+      CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+}
+#endif
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+  }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+  }
+}
+#endif
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // TODO(mbrandy): eliminate left sign extension if right >= 32
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (me < sh) me = sh;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kS390_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kS390_RotLeftAndClearRight64;
+          mask = me;
+        } else if (sh && me <= sh) {
+          match = true;
+          opcode = kS390_RotLeftAndClear64;
+          mask = mb;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
+    Int32BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 31 - sh) mb = 31 - sh;
+      sh = (32 - sh) & 0x1f;
+      if (mb >= me) {
+        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+             g.TempImmediate(mb), g.TempImmediate(me));
+        return;
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
+    Int64BinopMatcher mleft(m.left().node());
+    int sh = m.right().Value();
+    int mb;
+    int me;
+    if (mleft.right().HasValue() &&
+        IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
+      // Adjust the mask such that it doesn't include any rotated bits.
+      if (mb > 63 - sh) mb = 63 - sh;
+      sh = (64 - sh) & 0x3f;
+      if (mb >= me) {
+        bool match = false;
+        ArchOpcode opcode;
+        int mask;
+        if (me == 0) {
+          match = true;
+          opcode = kS390_RotLeftAndClearLeft64;
+          mask = mb;
+        } else if (mb == 63) {
+          match = true;
+          opcode = kS390_RotLeftAndClearRight64;
+          mask = me;
+        }
+        if (match) {
+          Emit(opcode, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
+               g.TempImmediate(mask));
+          return;
+        }
+      }
+    }
+  }
+  VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Replace with sign extension for (x << K) >> K where K is 16 or 24.
+  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
+  VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+}
+
+#if !V8_TARGET_ARCH_S390X
+void VisitPairBinop(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  S390OperandGenerator g(selector);
+
+  // We use UseUniqueRegister here to avoid register sharing with the output
+  // registers.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 4, inputs);
+}
+
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  VisitPairBinop(this, kS390_AddPair, node);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  VisitPairBinop(this, kS390_SubPair, node);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  S390OperandGenerator g(this);
+  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+                                 g.UseUniqueRegister(node->InputAt(1)),
+                                 g.UseUniqueRegister(node->InputAt(2)),
+                                 g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsRegister(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  Emit(kS390_MulPair, 2, outputs, 4, inputs);
+}
+
+void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  S390OperandGenerator g(selector);
+  Int32Matcher m(node->InputAt(2));
+  InstructionOperand shift_operand;
+  if (m.HasValue()) {
+    shift_operand = g.UseImmediate(m.node());
+  } else {
+    shift_operand = g.UseUniqueRegister(m.node());
+  }
+
+  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+                                 g.UseRegister(node->InputAt(1)),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitPairShift(this, kS390_ShiftLeftPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitPairShift(this, kS390_ShiftRightPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitPairShift(this, kS390_ShiftRightArithPair, node);
+}
+#endif
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+}
+#endif
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Cntlz32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Clz(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Cntlz64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Popcnt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_Popcnt64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+}
+#endif
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+  }
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+  }
+}
+#endif
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kS390_Mul32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kS390_Mul64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_MulHigh32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_MulHighU32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kS390_Div32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kS390_Div64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  VisitRRR(this, kS390_DivU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Div(Node* node) {
+  VisitRRR(this, kS390_DivU64, node);
+}
+#endif
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kS390_Mod32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kS390_Mod64, node);
+}
+#endif
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  VisitRRR(this, kS390_ModU32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitUint64Mod(Node* node) {
+  VisitRRR(this, kS390_ModU64, node);
+}
+#endif
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Float32ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kS390_Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kS390_Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Int32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  VisitRR(this, kS390_Uint32ToDouble, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  VisitRR(this, kS390_DoubleToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  VisitRR(this, kS390_DoubleToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+  VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_ExtendSignWord32, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_Uint32ToUint64, node);
+}
+#endif
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  VisitRR(this, kS390_DoubleToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+  switch (TruncationModeOf(node->op())) {
+    case TruncationMode::kJavaScript:
+      return VisitRR(this, kArchTruncateDoubleToI, node);
+    case TruncationMode::kRoundToZero:
+      return VisitRR(this, kS390_DoubleToInt32, node);
+  }
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kS390_Float32ToInt32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kS390_Float32ToUint32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  VisitRR(this, kS390_Int64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+  VisitRR(this, kS390_Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+  VisitRR(this, kS390_Int64ToDouble, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+  VisitRR(this, kS390_Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+  VisitRR(this, kS390_Uint64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+  VisitRR(this, kS390_BitcastFloat32ToInt32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+  VisitRR(this, kS390_BitcastDoubleToInt64, node);
+}
+#endif
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+  VisitRR(this, kS390_BitcastInt32ToFloat32, node);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+  VisitRR(this, kS390_BitcastInt64ToDouble, node);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+  VisitRRR(this, kS390_AddFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  // TODO(mbrandy): detect multiply-add
+  VisitRRR(this, kS390_AddDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+  S390OperandGenerator g(this);
+  Float32BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    Emit(kS390_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kS390_SubFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  // TODO(mbrandy): detect multiply-subtract
+  S390OperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    if (m.right().IsFloat64RoundDown() &&
+        CanCover(m.node(), m.right().node())) {
+      if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+          CanCover(m.right().node(), m.right().InputAt(0))) {
+        Float64BinopMatcher mright0(m.right().InputAt(0));
+        if (mright0.left().IsMinusZero()) {
+          // -floor(-x) = ceil(x)
+          Emit(kS390_CeilDouble, g.DefineAsRegister(node),
+               g.UseRegister(mright0.right().node()));
+          return;
+        }
+      }
+    }
+    Emit(kS390_NegDouble, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
+  VisitRRR(this, kS390_SubDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+  VisitRRR(this, kS390_MulFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  // TODO(mbrandy): detect negate
+  VisitRRR(this, kS390_MulDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+  VisitRRR(this, kS390_DivFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kS390_DivDouble, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
+       g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
+      ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+  VisitRR(this, kS390_AbsFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+  VisitRR(this, kS390_AbsDouble, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+  VisitRR(this, kS390_SqrtFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  VisitRR(this, kS390_SqrtDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+  VisitRR(this, kS390_FloorFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+  VisitRR(this, kS390_FloorDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+  VisitRR(this, kS390_CeilFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+  VisitRR(this, kS390_CeilDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+  VisitRR(this, kS390_TruncateFloat, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kS390_TruncateDouble, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRR(this, kS390_RoundDouble, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+  UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
+                                         kInt16Imm, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
+                                &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+                                         kInt16Imm_Negate, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
+                                kInt16Imm_Negate, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
+                                         &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
+                                         kInt16Imm_Negate, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+static bool CompareLogical(FlagsContinuation* cont) {
+  switch (cont->condition()) {
+    case kUnsignedLessThan:
+    case kUnsignedGreaterThanOrEqual:
+    case kUnsignedLessThanOrEqual:
+    case kUnsignedGreaterThan:
+      return true;
+    default:
+      return false;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+namespace {
+
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative, ImmediateMode immediate_mode) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, immediate_mode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, immediate_mode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+  VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+}
+#endif
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+#if V8_TARGET_ARCH_S390X
+      case IrOpcode::kWord64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+#endif
+      case IrOpcode::kFloat32Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (ProjectionIndexOf(value->op()) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either nullptr, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = NodeProperties::FindProjection(node, 0);
+          if (result == nullptr || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(
+                    selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int32BinopMatcher>(selector, node,
+                                                     kS390_SubWithOverflow32,
+                                                     kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_S390X
+              case IrOpcode::kInt64AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
+                                                     kInt16Imm, cont);
+              case IrOpcode::kInt64SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
+                                                     kInt16Imm_Negate, cont);
+#endif
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kInt32Sub:
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt32Add:
+// case IrOpcode::kWord32Or:
+// case IrOpcode::kWord32Xor:
+// case IrOpcode::kWord32Sar:
+// case IrOpcode::kWord32Shl:
+// case IrOpcode::kWord32Shr:
+// case IrOpcode::kWord32Ror:
+#if V8_TARGET_ARCH_S390X
+      case IrOpcode::kInt64Sub:
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kWord64And:
+        return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
+                                kInt16Imm_Unsigned);
+// TODO(mbrandy): Handle?
+// case IrOpcode::kInt64Add:
+// case IrOpcode::kWord64Or:
+// case IrOpcode::kWord64Xor:
+// case IrOpcode::kWord64Sar:
+// case IrOpcode::kWord64Shl:
+// case IrOpcode::kWord64Shr:
+// case IrOpcode::kWord64Ror:
+#endif
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  S390OperandGenerator g(selector);
+  VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
+               cont);
+}
+
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
+  VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+}
+#endif
+
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+  S390OperandGenerator g(this);
+  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+  // Emit either ArchTableSwitch or ArchLookupSwitch.
+  size_t table_space_cost = 4 + sw.value_range;
+  size_t table_time_cost = 3;
+  size_t lookup_space_cost = 3 + 2 * sw.case_count;
+  size_t lookup_time_cost = sw.case_count;
+  if (sw.case_count > 0 &&
+      table_space_cost + 3 * table_time_cost <=
+          lookup_space_cost + 3 * lookup_time_cost &&
+      sw.min_value > std::numeric_limits<int32_t>::min()) {
+    InstructionOperand index_operand = value_operand;
+    if (sw.min_value) {
+      index_operand = g.TempRegister();
+      Emit(kS390_Sub, index_operand, value_operand,
+           g.TempImmediate(sw.min_value));
+    }
+    // Generate a table lookup.
+    return EmitTableSwitch(sw, index_operand);
+  }
+
+  // Generate a sequence of conditional jumps.
+  return EmitLookupSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+#if V8_TARGET_ARCH_S390X
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
+  }
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+#endif
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::EmitPrepareArguments(
+    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+    Node* node) {
+  S390OperandGenerator g(this);
+
+  // Prepare for C function call.
+  if (descriptor->IsCFunctionCall()) {
+    Emit(kArchPrepareCallCFunction |
+             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+         0, nullptr, 0, nullptr);
+
+    // Poke any stack arguments.
+    int slot = kStackFrameExtraParamSlot;
+    for (PushParameter input : (*arguments)) {
+      Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+           g.TempImmediate(slot));
+      ++slot;
+    }
+  } else {
+    // Push any stack arguments.
+    int num_slots = static_cast<int>(descriptor->StackParameterCount());
+    int slot = 0;
+    for (PushParameter input : (*arguments)) {
+      if (slot == 0) {
+        DCHECK(input.node());
+        Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
+             g.TempImmediate(num_slots));
+      } else {
+        // Skip any alignment holes in pushed nodes.
+        if (input.node()) {
+          Emit(kS390_StoreToStackSlot, g.NoOutput(),
+               g.UseRegister(input.node()), g.TempImmediate(slot));
+        }
+      }
+      ++slot;
+    }
+  }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
+         g.UseRegister(right));
+    return;
+  }
+  Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+  S390OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+      CanCover(node, left)) {
+    left = left->InputAt(1);
+    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
+         g.UseRegister(left));
+    return;
+  }
+  Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
+       g.UseRegister(left), g.UseRegister(right));
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat32RoundDown |
+         MachineOperatorBuilder::kFloat64RoundDown |
+         MachineOperatorBuilder::kFloat32RoundUp |
+         MachineOperatorBuilder::kFloat64RoundUp |
+         MachineOperatorBuilder::kFloat32RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord64Popcnt;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 455fcd1..4ac65e5 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -298,6 +298,64 @@
   SetControlInput(block, sw);
 }
 
+void Schedule::EnsureSplitEdgeForm() {
+  // Make a copy of all the blocks for the iteration, since adding the split
+  // edges will allocate new blocks.
+  BasicBlockVector all_blocks_copy(all_blocks_);
+
+  // Insert missing split edge blocks.
+  for (auto block : all_blocks_copy) {
+    if (block->PredecessorCount() > 1 && block != end_) {
+      for (auto current_pred = block->predecessors().begin();
+           current_pred != block->predecessors().end(); ++current_pred) {
+        BasicBlock* pred = *current_pred;
+        if (pred->SuccessorCount() > 1) {
+          // Found a predecessor block with multiple successors.
+          BasicBlock* split_edge_block = NewBasicBlock();
+          split_edge_block->set_control(BasicBlock::kGoto);
+          split_edge_block->successors().push_back(block);
+          split_edge_block->predecessors().push_back(pred);
+          split_edge_block->set_deferred(pred->deferred());
+          *current_pred = split_edge_block;
+          // Find a corresponding successor in the previous block, replace it
+          // with the split edge block... but only do it once, since we only
+          // replace the previous blocks in the current block one at a time.
+          for (auto successor = pred->successors().begin();
+               successor != pred->successors().end(); ++successor) {
+            if (*successor == block) {
+              *successor = split_edge_block;
+              break;
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+void Schedule::PropagateDeferredMark() {
+  // Push forward the deferred block marks through newly inserted blocks and
+  // other improperly marked blocks until a fixed point is reached.
+  // TODO(danno): optimize the propagation
+  bool done = false;
+  while (!done) {
+    done = true;
+    for (auto block : all_blocks_) {
+      if (!block->deferred()) {
+        bool deferred = block->PredecessorCount() > 0;
+        for (auto pred : block->predecessors()) {
+          if (!pred->deferred()) {
+            deferred = false;
+          }
+        }
+        if (deferred) {
+          block->set_deferred(true);
+          done = false;
+        }
+      }
+    }
+  }
+}
 
 void Schedule::AddSuccessor(BasicBlock* block, BasicBlock* succ) {
   block->AddSuccessor(succ);
@@ -331,15 +389,24 @@
 
 
 std::ostream& operator<<(std::ostream& os, const Schedule& s) {
-  for (BasicBlock* block : *s.rpo_order()) {
-    os << "--- BLOCK B" << block->rpo_number();
+  for (BasicBlock* block :
+       ((s.RpoBlockCount() == 0) ? *s.all_blocks() : *s.rpo_order())) {
+    if (block->rpo_number() == -1) {
+      os << "--- BLOCK id:" << block->id().ToInt();
+    } else {
+      os << "--- BLOCK B" << block->rpo_number();
+    }
     if (block->deferred()) os << " (deferred)";
     if (block->PredecessorCount() != 0) os << " <- ";
     bool comma = false;
     for (BasicBlock const* predecessor : block->predecessors()) {
       if (comma) os << ", ";
       comma = true;
-      os << "B" << predecessor->rpo_number();
+      if (predecessor->rpo_number() == -1) {
+        os << "id:" << predecessor->id().ToInt();
+      } else {
+        os << "B" << predecessor->rpo_number();
+      }
     }
     os << " ---\n";
     for (Node* node : *block) {
@@ -364,7 +431,11 @@
       for (BasicBlock const* successor : block->successors()) {
         if (comma) os << ", ";
         comma = true;
-        os << "B" << successor->rpo_number();
+        if (successor->rpo_number() == -1) {
+          os << "id:" << successor->id().ToInt();
+        } else {
+          os << "B" << successor->rpo_number();
+        }
       }
       os << "\n";
     }
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 9624ff5..c99a0fc 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -243,6 +243,7 @@
     return AddSuccessor(block, succ);
   }
 
+  const BasicBlockVector* all_blocks() const { return &all_blocks_; }
   BasicBlockVector* rpo_order() { return &rpo_order_; }
   const BasicBlockVector* rpo_order() const { return &rpo_order_; }
 
@@ -254,6 +255,12 @@
  private:
   friend class Scheduler;
   friend class BasicBlockInstrumentor;
+  friend class RawMachineAssembler;
+
+  // Ensure split-edge form for a hand-assembled schedule.
+  void EnsureSplitEdgeForm();
+  // Copy deferred block markers down as far as possible
+  void PropagateDeferredMark();
 
   void AddSuccessor(BasicBlock* block, BasicBlock* succ);
   void MoveSuccessors(BasicBlock* from, BasicBlock* to);
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 80ce8b1..b04ba6f 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -1538,6 +1538,8 @@
   }
 
   BasicBlock* GetBlockForUse(Edge edge) {
+    // TODO(titzer): ignore uses from dead nodes (not visited in PrepareUses()).
+    // Dead uses only occur if the graph is not trimmed before scheduling.
     Node* use = edge.from();
     if (IrOpcode::IsPhiOpcode(use->opcode())) {
       // If the use is from a coupled (i.e. floating) phi, compute the common
@@ -1545,7 +1547,8 @@
       if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
         TRACE("  inspecting uses of coupled #%d:%s\n", use->id(),
               use->op()->mnemonic());
-        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+        // TODO(titzer): reenable once above TODO is addressed.
+        //        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
         return GetCommonDominatorOfUses(use);
       }
       // If the use is from a fixed (i.e. non-floating) phi, we use the
diff --git a/src/compiler/select-lowering.cc b/src/compiler/select-lowering.cc
index 0e8b36f..b1a2309 100644
--- a/src/compiler/select-lowering.cc
+++ b/src/compiler/select-lowering.cc
@@ -15,10 +15,7 @@
 namespace compiler {
 
 SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
-    : common_(common),
-      graph_(graph),
-      merges_(Merges::key_compare(), Merges::allocator_type(graph->zone())) {}
-
+    : common_(common), graph_(graph) {}
 
 SelectLowering::~SelectLowering() {}
 
@@ -30,58 +27,16 @@
   Node* cond = node->InputAt(0);
   Node* vthen = node->InputAt(1);
   Node* velse = node->InputAt(2);
-  Node* merge = nullptr;
 
-  // Check if we already have a diamond for this condition.
-  auto range = merges_.equal_range(cond);
-  for (auto i = range.first;; ++i) {
-    if (i == range.second) {
-      // Create a new diamond for this condition and remember its merge node.
-      Diamond d(graph(), common(), cond, p.hint());
-      merges_.insert(std::make_pair(cond, d.merge));
-      merge = d.merge;
-      break;
-    }
-
-    // If the diamond is reachable from the Select, merging them would result in
-    // an unschedulable graph, so we cannot reuse the diamond in that case.
-    merge = i->second;
-    if (!ReachableFrom(merge, node)) {
-      break;
-    }
-  }
-
-  // Create a Phi hanging off the previously determined merge.
+  // Create a diamond and a phi.
+  Diamond d(graph(), common(), cond, p.hint());
   node->ReplaceInput(0, vthen);
   node->ReplaceInput(1, velse);
-  node->ReplaceInput(2, merge);
+  node->ReplaceInput(2, d.merge);
   NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
   return Changed(node);
 }
 
-
-bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
-  // TODO(turbofan): This is probably horribly expensive, and it should be moved
-  // into node.h or somewhere else?!
-  Zone zone;
-  std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
-  BoolVector visited(graph()->NodeCount(), false, &zone);
-  queue.push(source);
-  visited[source->id()] = true;
-  while (!queue.empty()) {
-    Node* current = queue.front();
-    if (current == sink) return true;
-    queue.pop();
-    for (auto input : current->inputs()) {
-      if (!visited[input->id()]) {
-        queue.push(input);
-        visited[input->id()] = true;
-      }
-    }
-  }
-  return false;
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/select-lowering.h b/src/compiler/select-lowering.h
index 5894d35..b882a31 100644
--- a/src/compiler/select-lowering.h
+++ b/src/compiler/select-lowering.h
@@ -5,10 +5,7 @@
 #ifndef V8_COMPILER_SELECT_LOWERING_H_
 #define V8_COMPILER_SELECT_LOWERING_H_
 
-#include <map>
-
 #include "src/compiler/graph-reducer.h"
-#include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
@@ -28,17 +25,11 @@
   Reduction Reduce(Node* node) override;
 
  private:
-  typedef std::multimap<Node*, Node*, std::less<Node*>,
-                        zone_allocator<std::pair<Node* const, Node*>>> Merges;
-
-  bool ReachableFrom(Node* const sink, Node* const source);
-
   CommonOperatorBuilder* common() const { return common_; }
   Graph* graph() const { return graph_; }
 
   CommonOperatorBuilder* common_;
   Graph* graph_;
-  Merges merges_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index ed7fe9d..88931f5 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -104,15 +104,6 @@
   static UseInfo None() {
     return UseInfo(MachineRepresentation::kNone, Truncation::None());
   }
-
-  // Truncation to a representation that is smaller than the preferred
-  // one.
-  static UseInfo Float64TruncatingToWord32() {
-    return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
-  }
-  static UseInfo Word64TruncatingToWord32() {
-    return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
-  }
   static UseInfo AnyTruncatingToBool() {
     return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
   }
@@ -242,71 +233,6 @@
 class RepresentationSelector {
  public:
   // Information for each node tracked during the fixpoint.
-  class NodeOutputInfo {
-   public:
-    NodeOutputInfo(MachineRepresentation representation, Type* type)
-        : type_(type), representation_(representation) {}
-    NodeOutputInfo()
-        : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
-
-    MachineRepresentation representation() const { return representation_; }
-    Type* type() const { return type_; }
-
-    static NodeOutputInfo None() {
-      return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
-    }
-
-    static NodeOutputInfo Float32() {
-      return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
-    }
-
-    static NodeOutputInfo Float64() {
-      return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
-    }
-
-    static NodeOutputInfo NumberTruncatedToWord32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
-    }
-
-    static NodeOutputInfo Int32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
-    }
-
-    static NodeOutputInfo Uint32() {
-      return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
-    }
-
-    static NodeOutputInfo Bool() {
-      return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
-    }
-
-    static NodeOutputInfo Int64() {
-      // TODO(jarin) Fix once we have a real int64 type.
-      return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
-    }
-
-    static NodeOutputInfo Uint64() {
-      // TODO(jarin) Fix once we have a real uint64 type.
-      return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
-    }
-
-    static NodeOutputInfo AnyTagged() {
-      return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
-    }
-
-    static NodeOutputInfo NumberTagged() {
-      return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
-    }
-
-    static NodeOutputInfo Pointer() {
-      return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
-    }
-
-   private:
-    Type* type_;
-    MachineRepresentation representation_;
-  };
-
   class NodeInfo {
    public:
     // Adds new use to the node. Returns true if something has changed
@@ -322,17 +248,15 @@
     void set_visited() { visited_ = true; }
     bool visited() const { return visited_; }
     Truncation truncation() const { return truncation_; }
-    void set_output_type(NodeOutputInfo output) { output_ = output; }
+    void set_output(MachineRepresentation output) { representation_ = output; }
 
-    Type* output_type() const { return output_.type(); }
-    MachineRepresentation representation() const {
-      return output_.representation();
-    }
+    MachineRepresentation representation() const { return representation_; }
 
    private:
-    bool queued_ = false;                  // Bookkeeping for the traversal.
-    bool visited_ = false;                 // Bookkeeping for the traversal.
-    NodeOutputInfo output_;                // Output type and representation.
+    bool queued_ = false;   // Bookkeeping for the traversal.
+    bool visited_ = false;  // Bookkeeping for the traversal.
+    MachineRepresentation representation_ =
+        MachineRepresentation::kNone;             // Output representation.
     Truncation truncation_ = Truncation::None();  // Information about uses.
   };
 
@@ -467,76 +391,31 @@
     }
   }
 
-  void SetOutputFromMachineType(Node* node, MachineType machine_type) {
-    Type* type = Type::None();
-    switch (machine_type.semantic()) {
-      case MachineSemantic::kNone:
-        type = Type::None();
-        break;
-      case MachineSemantic::kBool:
-        type = Type::Boolean();
-        break;
-      case MachineSemantic::kInt32:
-        type = Type::Signed32();
-        break;
-      case MachineSemantic::kUint32:
-        type = Type::Unsigned32();
-        break;
-      case MachineSemantic::kInt64:
-        // TODO(jarin) Fix once we have proper int64.
-        type = Type::Internal();
-        break;
-      case MachineSemantic::kUint64:
-        // TODO(jarin) Fix once we have proper uint64.
-        type = Type::Internal();
-        break;
-      case MachineSemantic::kNumber:
-        type = Type::Number();
-        break;
-      case MachineSemantic::kAny:
-        type = Type::Any();
-        break;
-    }
-    return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
+  void SetOutput(Node* node, MachineRepresentation representation) {
+    NodeInfo* info = GetInfo(node);
+    DCHECK(
+        MachineRepresentationIsSubtype(info->representation(), representation));
+    info->set_output(representation);
   }
 
-  void SetOutput(Node* node, NodeOutputInfo output_info) {
-    // Every node should have at most one output representation. Note that
-    // phis can have 0, if they have not been used in a representation-inducing
-    // instruction.
-    Type* output_type = output_info.type();
-    if (NodeProperties::IsTyped(node)) {
-      output_type = Type::Intersect(NodeProperties::GetType(node),
-                                    output_info.type(), jsgraph_->zone());
-    }
-    NodeInfo* info = GetInfo(node);
-    DCHECK(info->output_type()->Is(output_type));
-    DCHECK(MachineRepresentationIsSubtype(info->representation(),
-                                          output_info.representation()));
-    if (!output_type->Is(info->output_type()) ||
-        output_info.representation() != info->representation()) {
-      EnqueueUses(node);
-    }
-    info->set_output_type(
-        NodeOutputInfo(output_info.representation(), output_type));
-  }
+  Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
 
   bool BothInputsAreSigned32(Node* node) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+    return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
+           GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
   }
 
   bool BothInputsAreUnsigned32(Node* node) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
+    return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
+           GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
   }
 
   bool BothInputsAre(Node* node, Type* type) {
     DCHECK_EQ(2, node->InputCount());
-    return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
-           GetInfo(node->InputAt(1))->output_type()->Is(type);
+    return GetUpperBound(node->InputAt(0))->Is(type) &&
+           GetUpperBound(node->InputAt(1))->Is(type);
   }
 
   void ConvertInput(Node* node, int index, UseInfo use) {
@@ -556,7 +435,7 @@
       PrintUseInfo(use);
       TRACE("\n");
       Node* n = changer_->GetRepresentationFor(
-          input, input_info->representation(), input_info->output_type(),
+          input, input_info->representation(), GetUpperBound(input),
           use.preferred(), use.truncation());
       node->ReplaceInput(index, n);
     }
@@ -602,7 +481,7 @@
 
   // Helper for binops of the R x L -> O variety.
   void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
-                  NodeOutputInfo output) {
+                  MachineRepresentation output) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     ProcessInput(node, 0, left_use);
     ProcessInput(node, 1, right_use);
@@ -613,80 +492,77 @@
   }
 
   // Helper for binops of the I x I -> O variety.
-  void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+  void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
     VisitBinop(node, input_use, input_use, output);
   }
 
   // Helper for unops of the I -> O variety.
-  void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
+  void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
     DCHECK_EQ(1, node->InputCount());
     ProcessInput(node, 0, input_use);
     SetOutput(node, output);
   }
 
   // Helper for leaf nodes.
-  void VisitLeaf(Node* node, NodeOutputInfo output) {
+  void VisitLeaf(Node* node, MachineRepresentation output) {
     DCHECK_EQ(0, node->InputCount());
     SetOutput(node, output);
   }
 
   // Helpers for specific types of binops.
   void VisitFloat64Binop(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
   }
   void VisitInt32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+    VisitBinop(node, UseInfo::TruncatingWord32(),
+               MachineRepresentation::kWord32);
   }
   void VisitWord32TruncatingBinop(Node* node) {
     VisitBinop(node, UseInfo::TruncatingWord32(),
-               NodeOutputInfo::NumberTruncatedToWord32());
+               MachineRepresentation::kWord32);
   }
   void VisitUint32Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+    VisitBinop(node, UseInfo::TruncatingWord32(),
+               MachineRepresentation::kWord32);
   }
   void VisitInt64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
+    VisitBinop(node, UseInfo::TruncatingWord64(),
+               MachineRepresentation::kWord64);
   }
   void VisitUint64Binop(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
+    VisitBinop(node, UseInfo::TruncatingWord64(),
+               MachineRepresentation::kWord64);
   }
   void VisitFloat64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
   }
   void VisitInt32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
   }
   void VisitUint32Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
   }
   void VisitInt64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
   }
   void VisitUint64Cmp(Node* node) {
-    VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+    VisitBinop(node, UseInfo::TruncatingWord64(), MachineRepresentation::kBit);
   }
 
   // Infer representation for phi-like nodes.
-  NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
-    // Compute the type.
-    Type* type = GetInfo(node->InputAt(0))->output_type();
-    for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
-      type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
-                         jsgraph_->zone());
-    }
-
+  MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
     // Compute the representation.
-    MachineRepresentation rep = MachineRepresentation::kTagged;
+    Type* type = GetUpperBound(node);
     if (type->Is(Type::None())) {
-      rep = MachineRepresentation::kNone;
+      return MachineRepresentation::kNone;
     } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
-      rep = MachineRepresentation::kWord32;
+      return MachineRepresentation::kWord32;
     } else if (use.TruncatesToWord32()) {
-      rep = MachineRepresentation::kWord32;
+      return MachineRepresentation::kWord32;
     } else if (type->Is(Type::Boolean())) {
-      rep = MachineRepresentation::kBit;
+      return MachineRepresentation::kBit;
     } else if (type->Is(Type::Number())) {
-      rep = MachineRepresentation::kFloat64;
+      return MachineRepresentation::kFloat64;
     } else if (type->Is(Type::Internal())) {
       // We mark (u)int64 as Type::Internal.
       // TODO(jarin) This is a workaround for our lack of (u)int64
@@ -702,10 +578,10 @@
                                  MachineRepresentation::kWord64);
       }
 #endif
-      rep = is_word64 ? MachineRepresentation::kWord64
-                      : MachineRepresentation::kTagged;
+      return is_word64 ? MachineRepresentation::kWord64
+                       : MachineRepresentation::kTagged;
     }
-    return NodeOutputInfo(rep, type);
+    return MachineRepresentation::kTagged;
   }
 
   // Helper for handling selects.
@@ -713,20 +589,20 @@
                    SimplifiedLowering* lowering) {
     ProcessInput(node, 0, UseInfo::Bool());
 
-    NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
     SetOutput(node, output);
 
     if (lower()) {
       // Update the select operator.
       SelectParameters p = SelectParametersOf(node->op());
-      if (output.representation() != p.representation()) {
-        NodeProperties::ChangeOp(node, lowering->common()->Select(
-                                           output.representation(), p.hint()));
+      if (output != p.representation()) {
+        NodeProperties::ChangeOp(node,
+                                 lowering->common()->Select(output, p.hint()));
       }
     }
     // Convert inputs to the output representation of this phi, pass the
     // truncation truncation along.
-    UseInfo input_use(output.representation(), truncation);
+    UseInfo input_use(output, truncation);
     ProcessInput(node, 1, input_use);
     ProcessInput(node, 2, input_use);
   }
@@ -734,21 +610,20 @@
   // Helper for handling phis.
   void VisitPhi(Node* node, Truncation truncation,
                 SimplifiedLowering* lowering) {
-    NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+    MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
     SetOutput(node, output);
 
     int values = node->op()->ValueInputCount();
     if (lower()) {
       // Update the phi operator.
-      if (output.representation() != PhiRepresentationOf(node->op())) {
-        NodeProperties::ChangeOp(
-            node, lowering->common()->Phi(output.representation(), values));
+      if (output != PhiRepresentationOf(node->op())) {
+        NodeProperties::ChangeOp(node, lowering->common()->Phi(output, values));
       }
     }
 
     // Convert inputs to the output representation of this phi, pass the
     // truncation truncation along.
-    UseInfo input_use(output.representation(), truncation);
+    UseInfo input_use(output, truncation);
     for (int i = 0; i < node->InputCount(); i++) {
       ProcessInput(node, i, i < values ? input_use : UseInfo::None());
     }
@@ -772,9 +647,10 @@
     }
 
     if (sig->return_count() > 0) {
-      SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
+      SetOutput(node,
+                desc->GetMachineSignature()->GetReturn().representation());
     } else {
-      SetOutput(node, NodeOutputInfo::AnyTagged());
+      SetOutput(node, MachineRepresentation::kTagged);
     }
   }
 
@@ -801,10 +677,10 @@
           new (zone->New(sizeof(ZoneVector<MachineType>)))
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
-        NodeInfo* input_info = GetInfo(node->InputAt(i));
-        MachineType machine_type(
-            input_info->representation(),
-            DeoptValueSemanticOf(input_info->output_type()));
+        Node* input = node->InputAt(i);
+        NodeInfo* input_info = GetInfo(input);
+        MachineType machine_type(input_info->representation(),
+                                 DeoptValueSemanticOf(GetUpperBound(input)));
         DCHECK(machine_type.representation() !=
                    MachineRepresentation::kWord32 ||
                machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -814,7 +690,7 @@
       NodeProperties::ChangeOp(node,
                                jsgraph_->common()->TypedStateValues(types));
     }
-    SetOutput(node, NodeOutputInfo::AnyTagged());
+    SetOutput(node, MachineRepresentation::kTagged);
   }
 
   const Operator* Int32Op(Node* node) {
@@ -839,29 +715,34 @@
       //------------------------------------------------------------------
       case IrOpcode::kStart:
       case IrOpcode::kDead:
-        return VisitLeaf(node, NodeOutputInfo::None());
+        return VisitLeaf(node, MachineRepresentation::kNone);
       case IrOpcode::kParameter: {
         // TODO(titzer): use representation from linkage.
-        Type* type = NodeProperties::GetType(node);
         ProcessInput(node, 0, UseInfo::None());
-        SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
+        SetOutput(node, MachineRepresentation::kTagged);
         return;
       }
       case IrOpcode::kInt32Constant:
-        return VisitLeaf(node, NodeOutputInfo::Int32());
+        return VisitLeaf(node, MachineRepresentation::kWord32);
       case IrOpcode::kInt64Constant:
-        return VisitLeaf(node, NodeOutputInfo::Int64());
+        return VisitLeaf(node, MachineRepresentation::kWord64);
       case IrOpcode::kFloat32Constant:
-        return VisitLeaf(node, NodeOutputInfo::Float32());
+        return VisitLeaf(node, MachineRepresentation::kFloat32);
       case IrOpcode::kFloat64Constant:
-        return VisitLeaf(node, NodeOutputInfo::Float64());
+        return VisitLeaf(node, MachineRepresentation::kFloat64);
       case IrOpcode::kExternalConstant:
-        return VisitLeaf(node, NodeOutputInfo::Pointer());
+        return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kNumberConstant:
-        return VisitLeaf(node, NodeOutputInfo::NumberTagged());
+        return VisitLeaf(node, MachineRepresentation::kTagged);
       case IrOpcode::kHeapConstant:
-        return VisitLeaf(node, NodeOutputInfo::AnyTagged());
+        return VisitLeaf(node, MachineRepresentation::kTagged);
 
+      case IrOpcode::kDeoptimizeIf:
+      case IrOpcode::kDeoptimizeUnless:
+        ProcessInput(node, 0, UseInfo::Bool());
+        ProcessInput(node, 1, UseInfo::AnyTagged());
+        ProcessRemainingInputs(node, 2);
+        break;
       case IrOpcode::kBranch:
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -889,7 +770,7 @@
         JS_OP_LIST(DEFINE_JS_CASE)
 #undef DEFINE_JS_CASE
         VisitInputs(node);
-        return SetOutput(node, NodeOutputInfo::AnyTagged());
+        return SetOutput(node, MachineRepresentation::kTagged);
 
       //------------------------------------------------------------------
       // Simplified operators.
@@ -909,7 +790,7 @@
         } else {
           // No input representation requirement; adapt during lowering.
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
-          SetOutput(node, NodeOutputInfo::Bool());
+          SetOutput(node, MachineRepresentation::kBit);
         }
         break;
       }
@@ -927,7 +808,7 @@
         } else {
           // No input representation requirement; adapt during lowering.
           ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
-          SetOutput(node, NodeOutputInfo::Int32());
+          SetOutput(node, MachineRepresentation::kWord32);
         }
         break;
       }
@@ -1055,46 +936,80 @@
         break;
       }
       case IrOpcode::kNumberShiftLeft: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
         }
         break;
       }
       case IrOpcode::kNumberShiftRight: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
         }
         break;
       }
       case IrOpcode::kNumberShiftRightLogical: {
-        Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, UseInfo::TruncatingWord32(),
-                   UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
         if (lower()) {
           lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
         }
         break;
       }
+      case IrOpcode::kNumberImul: {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        break;
+      }
+      case IrOpcode::kNumberClz32: {
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
+        if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+        break;
+      }
+      case IrOpcode::kNumberCeil: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
+        break;
+      }
+      case IrOpcode::kNumberFloor: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
+        break;
+      }
+      case IrOpcode::kNumberRound: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Round(node));
+        break;
+      }
+      case IrOpcode::kNumberTrunc: {
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+        if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
+        break;
+      }
       case IrOpcode::kNumberToInt32: {
         // Just change representation if necessary.
-        VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
         break;
       }
       case IrOpcode::kNumberToUint32: {
         // Just change representation if necessary.
-        VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+        VisitUnop(node, UseInfo::TruncatingWord32(),
+                  MachineRepresentation::kWord32);
         if (lower()) DeferReplacement(node, node->InputAt(0));
         break;
       }
       case IrOpcode::kNumberIsHoleNaN: {
-        VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+        VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
         if (lower()) {
           // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
           //                                   #HoleNaNLower32)
@@ -1109,7 +1024,7 @@
         break;
       }
       case IrOpcode::kPlainPrimitiveToNumber: {
-        VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         if (lower()) {
           // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
           Operator::Properties properties = node->op()->properties();
@@ -1126,38 +1041,93 @@
         break;
       }
       case IrOpcode::kReferenceEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         if (lower()) {
           NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
         }
         break;
       }
       case IrOpcode::kStringEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringEqual(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kStringLessThan: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringLessThan(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kStringLessThanOrEqual: {
-        VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
-        if (lower()) lowering->DoStringLessThanOrEqual(node);
+        VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringLessThanOrEqual(x, y)
+          //   => Call(StringLessThanOrEqualStub, x, y, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable =
+              CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
+        break;
+      }
+      case IrOpcode::kStringToNumber: {
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+        if (lower()) {
+          // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
+          Operator::Properties properties = node->op()->properties();
+          Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
+          CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
+              flags, properties);
+          node->InsertInput(jsgraph_->zone(), 0,
+                            jsgraph_->HeapConstant(callable.code()));
+          node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+          NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+        }
         break;
       }
       case IrOpcode::kAllocate: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         ProcessRemainingInputs(node, 1);
-        SetOutput(node, NodeOutputInfo::AnyTagged());
+        SetOutput(node, MachineRepresentation::kTagged);
         break;
       }
       case IrOpcode::kLoadField: {
         FieldAccess access = FieldAccessOf(node->op());
         ProcessInput(node, 0, UseInfoForBasePointer(access));
         ProcessRemainingInputs(node, 1);
-        SetOutputFromMachineType(node, access.machine_type);
+        SetOutput(node, access.machine_type.representation());
         break;
       }
       case IrOpcode::kStoreField: {
@@ -1166,7 +1136,7 @@
         ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
                                   access.machine_type.representation()));
         ProcessRemainingInputs(node, 2);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
       case IrOpcode::kLoadBuffer: {
@@ -1176,29 +1146,26 @@
         ProcessInput(node, 2, UseInfo::TruncatingWord32());  // length
         ProcessRemainingInputs(node, 3);
 
-        NodeOutputInfo output_info;
+        MachineRepresentation output;
         if (truncation.TruncatesUndefinedToZeroOrNaN()) {
           if (truncation.TruncatesNaNToZero()) {
             // If undefined is truncated to a non-NaN number, we can use
             // the load's representation.
-            output_info = NodeOutputInfo(access.machine_type().representation(),
-                                         NodeProperties::GetType(node));
+            output = access.machine_type().representation();
           } else {
             // If undefined is truncated to a number, but the use can
             // observe NaN, we need to output at least the float32
             // representation.
             if (access.machine_type().representation() ==
                 MachineRepresentation::kFloat32) {
-              output_info =
-                  NodeOutputInfo(access.machine_type().representation(),
-                                 NodeProperties::GetType(node));
+              output = access.machine_type().representation();
             } else {
               if (access.machine_type().representation() !=
                   MachineRepresentation::kFloat64) {
                 // TODO(bmeurer): See comment on abort_compilation_.
                 if (lower()) lowering->abort_compilation_ = true;
               }
-              output_info = NodeOutputInfo::Float64();
+              output = MachineRepresentation::kFloat64;
             }
           }
         } else {
@@ -1207,11 +1174,10 @@
 
           // If undefined is not truncated away, we need to have the tagged
           // representation.
-          output_info = NodeOutputInfo::AnyTagged();
+          output = MachineRepresentation::kTagged;
         }
-        SetOutput(node, output_info);
-        if (lower())
-          lowering->DoLoadBuffer(node, output_info.representation(), changer_);
+        SetOutput(node, output);
+        if (lower()) lowering->DoLoadBuffer(node, output, changer_);
         break;
       }
       case IrOpcode::kStoreBuffer: {
@@ -1223,7 +1189,7 @@
                      TruncatingUseInfoFromRepresentation(
                          access.machine_type().representation()));  // value
         ProcessRemainingInputs(node, 4);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         if (lower()) lowering->DoStoreBuffer(node);
         break;
       }
@@ -1232,7 +1198,7 @@
         ProcessInput(node, 0, UseInfoForBasePointer(access));  // base
         ProcessInput(node, 1, UseInfo::TruncatingWord32());    // index
         ProcessRemainingInputs(node, 2);
-        SetOutputFromMachineType(node, access.machine_type);
+        SetOutput(node, access.machine_type.representation());
         break;
       }
       case IrOpcode::kStoreElement: {
@@ -1243,22 +1209,15 @@
                      TruncatingUseInfoFromRepresentation(
                          access.machine_type.representation()));  // value
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
-      case IrOpcode::kObjectIsNumber: {
+      case IrOpcode::kObjectIsNumber:
+      case IrOpcode::kObjectIsReceiver:
+      case IrOpcode::kObjectIsSmi:
+      case IrOpcode::kObjectIsUndetectable: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
-        break;
-      }
-      case IrOpcode::kObjectIsReceiver: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
-        break;
-      }
-      case IrOpcode::kObjectIsSmi: {
-        ProcessInput(node, 0, UseInfo::AnyTagged());
-        SetOutput(node, NodeOutputInfo::Bool());
+        SetOutput(node, MachineRepresentation::kBit);
         break;
       }
 
@@ -1272,7 +1231,7 @@
         ProcessInput(node, 0, UseInfo::AnyTagged());   // tagged pointer
         ProcessInput(node, 1, UseInfo::PointerInt());  // index
         ProcessRemainingInputs(node, 2);
-        SetOutputFromMachineType(node, rep);
+        SetOutput(node, rep.representation());
         break;
       }
       case IrOpcode::kStore: {
@@ -1284,13 +1243,13 @@
         ProcessInput(node, 2,
                      TruncatingUseInfoFromRepresentation(rep.representation()));
         ProcessRemainingInputs(node, 3);
-        SetOutput(node, NodeOutputInfo::None());
+        SetOutput(node, MachineRepresentation::kNone);
         break;
       }
       case IrOpcode::kWord32Shr:
         // We output unsigned int32 for shift right because JavaScript.
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Uint32());
+                          MachineRepresentation::kWord32);
       case IrOpcode::kWord32And:
       case IrOpcode::kWord32Or:
       case IrOpcode::kWord32Xor:
@@ -1300,14 +1259,14 @@
         // though the machine bits are the same for either signed or unsigned,
         // because JavaScript considers the result from these operations signed.
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Int32());
+                          MachineRepresentation::kWord32);
       case IrOpcode::kWord32Equal:
         return VisitBinop(node, UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Bool());
+                          MachineRepresentation::kBit);
 
       case IrOpcode::kWord32Clz:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         NodeOutputInfo::Uint32());
+                         MachineRepresentation::kWord32);
 
       case IrOpcode::kInt32Add:
       case IrOpcode::kInt32Sub:
@@ -1352,45 +1311,33 @@
       case IrOpcode::kWord64Shr:
       case IrOpcode::kWord64Sar:
         return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          NodeOutputInfo::Int64());
+                          MachineRepresentation::kWord64);
       case IrOpcode::kWord64Equal:
         return VisitBinop(node, UseInfo::TruncatingWord64(),
-                          NodeOutputInfo::Bool());
+                          MachineRepresentation::kBit);
 
       case IrOpcode::kChangeInt32ToInt64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kWord64);
       case IrOpcode::kChangeUint32ToUint64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kWord64);
       case IrOpcode::kTruncateFloat64ToFloat32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kFloat32);
       case IrOpcode::kTruncateFloat64ToInt32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
-      case IrOpcode::kTruncateInt64ToInt32:
-        // TODO(titzer): Is kTypeInt32 correct here?
-        return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
-                         NodeOutputInfo::Int32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kWord32);
 
       case IrOpcode::kChangeFloat32ToFloat64:
-        return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
+        return VisitUnop(node, UseInfo::Float32(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kChangeInt32ToFloat64:
-        return VisitUnop(
-            node, UseInfo::TruncatingWord32(),
-            NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
+        return VisitUnop(node, UseInfo::TruncatingWord32(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kChangeUint32ToFloat64:
         return VisitUnop(node, UseInfo::TruncatingWord32(),
-                         NodeOutputInfo(MachineRepresentation::kFloat64,
-                                        Type::Unsigned32()));
-      case IrOpcode::kChangeFloat64ToInt32:
-        return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
-                         NodeOutputInfo::Int32());
-      case IrOpcode::kChangeFloat64ToUint32:
-        return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
-                         NodeOutputInfo::Uint32());
-
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Add:
       case IrOpcode::kFloat64Sub:
       case IrOpcode::kFloat64Mul:
@@ -1404,29 +1351,40 @@
       case IrOpcode::kFloat64RoundTruncate:
       case IrOpcode::kFloat64RoundTiesAway:
       case IrOpcode::kFloat64RoundUp:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kFloat64);
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
       case IrOpcode::kFloat64LessThanOrEqual:
         return VisitFloat64Cmp(node);
       case IrOpcode::kFloat64ExtractLowWord32:
       case IrOpcode::kFloat64ExtractHighWord32:
-        return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
+        return VisitUnop(node, UseInfo::Float64(),
+                         MachineRepresentation::kWord32);
       case IrOpcode::kFloat64InsertLowWord32:
       case IrOpcode::kFloat64InsertHighWord32:
         return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
-                          NodeOutputInfo::Float64());
+                          MachineRepresentation::kFloat64);
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
       case IrOpcode::kLoadParentFramePointer:
-        return VisitLeaf(node, NodeOutputInfo::Pointer());
+        return VisitLeaf(node, MachineType::PointerRepresentation());
       case IrOpcode::kStateValues:
         VisitStateValues(node);
         break;
+
+      // The following opcodes are not produced before representation
+      // inference runs, so we do not have any real test coverage.
+      // Simply fail here.
+      case IrOpcode::kChangeFloat64ToInt32:
+      case IrOpcode::kChangeFloat64ToUint32:
+      case IrOpcode::kTruncateInt64ToInt32:
+        FATAL("Representation inference: unsupported opcodes.");
+
       default:
         VisitInputs(node);
         // Assume the output is tagged.
-        SetOutput(node, NodeOutputInfo::AnyTagged());
+        SetOutput(node, MachineRepresentation::kTagged);
         break;
     }
   }
@@ -1437,7 +1395,7 @@
           replacement->op()->mnemonic());
 
     if (replacement->id() < count_ &&
-        GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
+        GetUpperBound(node)->Is(GetUpperBound(replacement))) {
       // Replace with a previously existing node eagerly only if the type is the
       // same.
       node->ReplaceUses(replacement);
@@ -1455,9 +1413,7 @@
   void PrintOutputInfo(NodeInfo* info) {
     if (FLAG_trace_representation) {
       OFStream os(stdout);
-      os << info->representation() << " (";
-      info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
-      os << ")";
+      os << info->representation();
     }
   }
 
@@ -1595,21 +1551,392 @@
   NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
 }
 
+Node* SimplifiedLowering::Float64Ceil(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
 
-Node* SimplifiedLowering::StringComparison(Node* node) {
-  Operator::Properties properties = node->op()->properties();
-  Callable callable = CodeFactory::StringCompare(isolate());
-  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, flags, properties);
-  return graph()->NewNode(
-      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-      NodeProperties::GetValueInput(node, 0),
-      NodeProperties::GetValueInput(node, 1), jsgraph()->NoContextConstant(),
-      NodeProperties::GetEffectInput(node),
-      NodeProperties::GetControlInput(node));
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundUp().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundUp().op(), input);
+  }
+
+  // General case for ceil.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if temp1 < input then
+  //         temp1 + 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
+          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
 }
 
+Node* SimplifiedLowering::Float64Floor(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundDown().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundDown().op(), input);
+  }
+
+  // General case for floor.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         if temp2 < temp1 then
+  //           -1 - temp2
+  //         else
+  //           -0 - temp2
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        vfalse2 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+            graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
+}
+
+Node* SimplifiedLowering::Float64Round(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const one_half = jsgraph()->Float64Constant(0.5);
+  Node* const input = node->InputAt(0);
+
+  // Round up towards Infinity, and adjust if the difference exceeds 0.5.
+  Node* result = Float64Ceil(node);
+  return graph()->NewNode(
+      common()->Select(MachineRepresentation::kFloat64),
+      graph()->NewNode(
+          machine()->Float64LessThanOrEqual(),
+          graph()->NewNode(machine()->Float64Sub(), result, one_half), input),
+      result, graph()->NewNode(machine()->Float64Sub(), result, one));
+}
+
+Node* SimplifiedLowering::Float64Trunc(Node* const node) {
+  Node* const one = jsgraph()->Float64Constant(1.0);
+  Node* const zero = jsgraph()->Float64Constant(0.0);
+  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+  Node* const input = node->InputAt(0);
+
+  // Use fast hardware instruction if available.
+  if (machine()->Float64RoundTruncate().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
+  }
+
+  // General case for trunc.
+  //
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+  //         -0 - temp3
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+
+  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* vtrue0;
+  {
+    Node* check1 =
+        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* temp1 = graph()->NewNode(
+          machine()->Float64Sub(),
+          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+      vfalse1 = graph()->NewNode(
+          common()->Select(MachineRepresentation::kFloat64),
+          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+    }
+
+    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                              vtrue1, vfalse1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* vfalse0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* vtrue1 = input;
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* vfalse1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+                                      input, minus_two_52);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* vtrue2 = input;
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* vfalse2;
+      {
+        Node* temp1 =
+            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+        Node* temp2 = graph()->NewNode(
+            machine()->Float64Sub(),
+            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+        Node* temp3 = graph()->NewNode(
+            common()->Select(MachineRepresentation::kFloat64),
+            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+      }
+
+      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+      vfalse1 =
+          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                           vtrue2, vfalse2, if_false1);
+    }
+
+    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+    vfalse0 =
+        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                         vtrue1, vfalse1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+  return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+                          vtrue0, vfalse0, merge0);
+}
 
 Node* SimplifiedLowering::Int32Div(Node* const node) {
   Int32BinopMatcher m(node);
@@ -1873,53 +2200,6 @@
   NodeProperties::ChangeOp(node, op);
 }
 
-
-namespace {
-
-void ReplaceEffectUses(Node* node, Node* replacement) {
-  // Requires distinguishing between value and effect edges.
-  DCHECK(replacement->op()->EffectOutputCount() > 0);
-  for (Edge edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(replacement);
-    } else {
-      DCHECK(NodeProperties::IsValueEdge(edge));
-    }
-  }
-}
-
-}  // namespace
-
-
-void SimplifiedLowering::DoStringEqual(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
-void SimplifiedLowering::DoStringLessThan(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->IntLessThan());
-}
-
-
-void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
-  Node* comparison = StringComparison(node);
-  ReplaceEffectUses(node, comparison);
-  node->ReplaceInput(0, comparison);
-  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
-  node->TrimInputCount(2);
-  NodeProperties::ChangeOp(node, machine()->IntLessThanOrEqual());
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 358bd97..8b711a9 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -37,9 +37,6 @@
                     RepresentationChanger* changer);
   void DoStoreBuffer(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
-  void DoStringEqual(Node* node);
-  void DoStringLessThan(Node* node);
-  void DoStringLessThanOrEqual(Node* node);
 
   // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
   // typing hack to support the gigantic "asm.js should be fast without proper
@@ -58,7 +55,10 @@
   // position information via the SourcePositionWrapper like all other reducers.
   SourcePositionTable* source_positions_;
 
-  Node* StringComparison(Node* node);
+  Node* Float64Ceil(Node* const node);
+  Node* Float64Floor(Node* const node);
+  Node* Float64Round(Node* const node);
+  Node* Float64Trunc(Node* const node);
   Node* Int32Div(Node* const node);
   Node* Int32Mod(Node* const node);
   Node* Uint32Div(Node* const node);
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 120d792..012004a 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -9,14 +9,14 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
 #include "src/conversions-inl.h"
+#include "src/type-cache.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
 SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
-    : jsgraph_(jsgraph) {}
-
+    : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
 
 SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
 
@@ -89,6 +89,17 @@
       if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
       break;
     }
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc: {
+      Node* const input = NodeProperties::GetValueInput(node, 0);
+      Type* const input_type = NodeProperties::GetType(input);
+      if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+        return Replace(input);
+      }
+      break;
+    }
     case IrOpcode::kReferenceEqual:
       return ReduceReferenceEqual(node);
     default:
@@ -97,7 +108,6 @@
   return NoChange();
 }
 
-
 Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
   DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
   Node* const left = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 979a3d0..13301c2 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -9,6 +9,10 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
 namespace compiler {
 
 // Forward declarations.
@@ -42,6 +46,7 @@
   SimplifiedOperatorBuilder* simplified() const;
 
   JSGraph* const jsgraph_;
+  TypeCache const& type_cache_;
 
   DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
 };
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index c7abe9c..daa9501 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -156,7 +156,6 @@
   return OpParameter<ElementAccess>(op);
 }
 
-
 #define PURE_OP_LIST(V)                                  \
   V(BooleanNot, Operator::kNoProperties, 1)              \
   V(BooleanToNumber, Operator::kNoProperties, 1)         \
@@ -174,10 +173,17 @@
   V(NumberShiftLeft, Operator::kNoProperties, 2)         \
   V(NumberShiftRight, Operator::kNoProperties, 2)        \
   V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+  V(NumberImul, Operator::kNoProperties, 2)              \
+  V(NumberClz32, Operator::kNoProperties, 1)             \
+  V(NumberCeil, Operator::kNoProperties, 1)              \
+  V(NumberFloor, Operator::kNoProperties, 1)             \
+  V(NumberRound, Operator::kNoProperties, 1)             \
+  V(NumberTrunc, Operator::kNoProperties, 1)             \
   V(NumberToInt32, Operator::kNoProperties, 1)           \
   V(NumberToUint32, Operator::kNoProperties, 1)          \
   V(NumberIsHoleNaN, Operator::kNoProperties, 1)         \
   V(PlainPrimitiveToNumber, Operator::kNoProperties, 1)  \
+  V(StringToNumber, Operator::kNoProperties, 1)          \
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1)     \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1)    \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1)   \
@@ -188,7 +194,8 @@
   V(ChangeBitToBool, Operator::kNoProperties, 1)         \
   V(ObjectIsNumber, Operator::kNoProperties, 1)          \
   V(ObjectIsReceiver, Operator::kNoProperties, 1)        \
-  V(ObjectIsSmi, Operator::kNoProperties, 1)
+  V(ObjectIsSmi, Operator::kNoProperties, 1)             \
+  V(ObjectIsUndetectable, Operator::kNoProperties, 1)
 
 #define NO_THROW_OP_LIST(V)                 \
   V(StringEqual, Operator::kCommutative, 2) \
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 2ed4b5f..a39d864 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -143,6 +143,12 @@
   const Operator* NumberShiftLeft();
   const Operator* NumberShiftRight();
   const Operator* NumberShiftRightLogical();
+  const Operator* NumberImul();
+  const Operator* NumberClz32();
+  const Operator* NumberCeil();
+  const Operator* NumberFloor();
+  const Operator* NumberRound();
+  const Operator* NumberTrunc();
   const Operator* NumberToInt32();
   const Operator* NumberToUint32();
   const Operator* NumberIsHoleNaN();
@@ -154,6 +160,7 @@
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringToNumber();
 
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
@@ -167,6 +174,7 @@
   const Operator* ObjectIsNumber();
   const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
+  const Operator* ObjectIsUndetectable();
 
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
 
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index 42c4627..da4f268 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -48,10 +48,10 @@
   if (i == infos_.end()) return false;
   Handle<Code> code = i->second;
   DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
-  ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
-// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
-#define ASSERT_COMPATIBLE(NAME, Name)       \
-  STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+  ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanICStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name)         \
+  STATIC_ASSERT(1 << ToBooleanICStub::NAME == \
                 static_cast<int>(ToBooleanHint::k##Name))
   ASSERT_COMPATIBLE(UNDEFINED, Undefined);
   ASSERT_COMPATIBLE(BOOLEAN, Boolean);
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 9679513..81c3d3d 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -57,10 +57,8 @@
   unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
   falsish_ = Type::Union(
       Type::Undetectable(),
-      Type::Union(
-          Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
-                      Type::NullOrUndefined(), zone),
-          singleton_the_hole_, zone),
+      Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+                  singleton_the_hole_, zone),
       zone);
   truish_ = Type::Union(
       singleton_true_,
@@ -116,6 +114,8 @@
       DECLARE_CASE(IfDefault)
       DECLARE_CASE(Merge)
       DECLARE_CASE(Deoptimize)
+      DECLARE_CASE(DeoptimizeIf)
+      DECLARE_CASE(DeoptimizeUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -160,6 +160,8 @@
       DECLARE_CASE(IfDefault)
       DECLARE_CASE(Merge)
       DECLARE_CASE(Deoptimize)
+      DECLARE_CASE(DeoptimizeIf)
+      DECLARE_CASE(DeoptimizeUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -238,12 +240,17 @@
   static Type* ToNumber(Type*, Typer*);
   static Type* ToObject(Type*, Typer*);
   static Type* ToString(Type*, Typer*);
+  static Type* NumberCeil(Type*, Typer*);
+  static Type* NumberFloor(Type*, Typer*);
+  static Type* NumberRound(Type*, Typer*);
+  static Type* NumberTrunc(Type*, Typer*);
   static Type* NumberToInt32(Type*, Typer*);
   static Type* NumberToUint32(Type*, Typer*);
 
   static Type* ObjectIsNumber(Type*, Typer*);
   static Type* ObjectIsReceiver(Type*, Typer*);
   static Type* ObjectIsSmi(Type*, Typer*);
+  static Type* ObjectIsUndetectable(Type*, Typer*);
 
   static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
   static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
@@ -414,6 +421,11 @@
   // ES6 section 7.1.4 ToInteger ( argument )
   type = ToNumber(type, t);
   if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Type::Union(
+        Type::Intersect(type, t->cache_.kIntegerOrMinusZero, t->zone()),
+        t->cache_.kSingletonZero, t->zone());
+  }
   return t->cache_.kIntegerOrMinusZero;
 }
 
@@ -469,7 +481,9 @@
   // ES6 section 7.1.13 ToObject ( argument )
   if (type->Is(Type::Receiver())) return type;
   if (type->Is(Type::Primitive())) return Type::OtherObject();
-  if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+  if (!type->Maybe(Type::OtherUndetectable())) {
+    return Type::DetectableReceiver();
+  }
   return Type::Receiver();
 }
 
@@ -482,6 +496,37 @@
   return Type::String();
 }
 
+// static
+Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberFloor(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberRound(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+// static
+Type* Typer::Visitor::NumberTrunc(Type* type, Typer* t) {
+  DCHECK(type->Is(Type::Number()));
+  if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
+  // TODO(bmeurer): We could infer a more precise type here.
+  return t->cache_.kIntegerOrMinusZeroOrNaN;
+}
 
 Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
   // TODO(neis): DCHECK(type->Is(Type::Number()));
@@ -533,6 +578,13 @@
 }
 
 
+Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
+  if (type->Is(Type::Undetectable())) return t->singleton_true_;
+  if (!type->Maybe(Type::Undetectable())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
 // -----------------------------------------------------------------------------
 
 
@@ -1171,7 +1223,7 @@
     return Type::Constant(f->string_string(), t->zone());
   } else if (type->Is(Type::Symbol())) {
     return Type::Constant(f->symbol_string(), t->zone());
-  } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
+  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
                                   t->zone()))) {
     return Type::Constant(f->undefined_string(), t->zone());
   } else if (type->Is(Type::Null())) {
@@ -1198,26 +1250,29 @@
   return TypeUnaryOp(node, ToBoolean);
 }
 
-
-Type* Typer::Visitor::TypeJSToNumber(Node* node) {
-  return TypeUnaryOp(node, ToNumber);
+Type* Typer::Visitor::TypeJSToInteger(Node* node) {
+  return TypeUnaryOp(node, ToInteger);
 }
 
-
-Type* Typer::Visitor::TypeJSToString(Node* node) {
-  return TypeUnaryOp(node, ToString);
+Type* Typer::Visitor::TypeJSToLength(Node* node) {
+  return TypeUnaryOp(node, ToLength);
 }
 
-
 Type* Typer::Visitor::TypeJSToName(Node* node) {
   return TypeUnaryOp(node, ToName);
 }
 
+Type* Typer::Visitor::TypeJSToNumber(Node* node) {
+  return TypeUnaryOp(node, ToNumber);
+}
 
 Type* Typer::Visitor::TypeJSToObject(Node* node) {
   return TypeUnaryOp(node, ToObject);
 }
 
+Type* Typer::Visitor::TypeJSToString(Node* node) {
+  return TypeUnaryOp(node, ToString);
+}
 
 // JS object operators.
 
@@ -1502,8 +1557,9 @@
         case kMathRandom:
           return Type::OrderedNumber();
         case kMathFloor:
-        case kMathRound:
         case kMathCeil:
+        case kMathRound:
+        case kMathTrunc:
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         // Unary math functions.
         case kMathAbs:
@@ -1573,15 +1629,8 @@
     case Runtime::kInlineDoubleHi:
       return Type::Signed32();
     case Runtime::kInlineConstructDouble:
-    case Runtime::kInlineMathFloor:
-    case Runtime::kInlineMathSqrt:
-    case Runtime::kInlineMathAcos:
-    case Runtime::kInlineMathAsin:
-    case Runtime::kInlineMathAtan:
     case Runtime::kInlineMathAtan2:
       return Type::Number();
-    case Runtime::kInlineMathClz32:
-      return Type::Range(0, 32, zone());
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
@@ -1708,6 +1757,27 @@
   return Type::Unsigned32();
 }
 
+Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
+
+Type* Typer::Visitor::TypeNumberClz32(Node* node) {
+  return typer_->cache_.kZeroToThirtyTwo;
+}
+
+Type* Typer::Visitor::TypeNumberCeil(Node* node) {
+  return TypeUnaryOp(node, NumberCeil);
+}
+
+Type* Typer::Visitor::TypeNumberFloor(Node* node) {
+  return TypeUnaryOp(node, NumberFloor);
+}
+
+Type* Typer::Visitor::TypeNumberRound(Node* node) {
+  return TypeUnaryOp(node, NumberRound);
+}
+
+Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
+  return TypeUnaryOp(node, NumberTrunc);
+}
 
 Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
   return TypeUnaryOp(node, NumberToInt32);
@@ -1750,6 +1820,9 @@
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeStringToNumber(Node* node) {
+  return TypeUnaryOp(node, ToNumber);
+}
 
 namespace {
 
@@ -1925,6 +1998,11 @@
 }
 
 
+Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsUndetectable);
+}
+
+
 // Machine operators.
 
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
@@ -2132,6 +2210,10 @@
                          zone());
 }
 
+Type* Typer::Visitor::TypeTruncateFloat64ToUint32(Node* node) {
+  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+                         zone());
+}
 
 Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
   return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
@@ -2427,6 +2509,17 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeInt32PairMul(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShl(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
 
 // Heap constants.
 
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 99480ca..a69ace9 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -29,13 +29,13 @@
 
 
 static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
-  auto const uses = def->uses();
+  const Node::Uses uses = def->uses();
   return std::find(uses.begin(), uses.end(), use) != uses.end();
 }
 
 
 static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
-  auto const inputs = use->inputs();
+  const Node::Inputs inputs = use->inputs();
   return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
 }
 
@@ -135,6 +135,11 @@
     CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
     CHECK(IsDefUseChainLinkPresent(value, node));
     CHECK(IsUseDefChainLinkPresent(value, node));
+    // Verify that only parameters and projections can have input nodes with
+    // multiple outputs.
+    CHECK(node->opcode() == IrOpcode::kParameter ||
+          node->opcode() == IrOpcode::kProjection ||
+          value->op()->ValueOutputCount() <= 1);
   }
 
   // Verify all context inputs are value nodes.
@@ -161,16 +166,6 @@
     CHECK(IsUseDefChainLinkPresent(control, node));
   }
 
-  // Verify all successors are projections if multiple value outputs exist.
-  if (node->op()->ValueOutputCount() > 1) {
-    for (Edge edge : node->use_edges()) {
-      Node* use = edge.from();
-      CHECK(!NodeProperties::IsValueEdge(edge) ||
-            use->opcode() == IrOpcode::kProjection ||
-            use->opcode() == IrOpcode::kParameter);
-    }
-  }
-
   switch (node->opcode()) {
     case IrOpcode::kStart:
       // Start has no inputs.
@@ -194,7 +189,7 @@
     case IrOpcode::kBranch: {
       // Branch uses are IfTrue and IfFalse.
       int count_true = 0, count_false = 0;
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK(use->opcode() == IrOpcode::kIfTrue ||
               use->opcode() == IrOpcode::kIfFalse);
         if (use->opcode() == IrOpcode::kIfTrue) ++count_true;
@@ -232,10 +227,10 @@
     case IrOpcode::kSwitch: {
       // Switch uses are Case and Default.
       int count_case = 0, count_default = 0;
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         switch (use->opcode()) {
           case IrOpcode::kIfValue: {
-            for (auto user : node->uses()) {
+            for (const Node* user : node->uses()) {
               if (user != use && user->opcode() == IrOpcode::kIfValue) {
                 CHECK_NE(OpParameter<int32_t>(use->op()),
                          OpParameter<int32_t>(user->op()));
@@ -274,11 +269,16 @@
       // Type is empty.
       CheckNotTyped(node);
       break;
+    case IrOpcode::kDeoptimizeIf:
+    case IrOpcode::kDeoptimizeUnless:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kDeoptimize:
     case IrOpcode::kReturn:
     case IrOpcode::kThrow:
       // Deoptimize, Return and Throw uses are End.
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK_EQ(IrOpcode::kEnd, use->opcode());
       }
       // Type is empty.
@@ -292,7 +292,7 @@
       CHECK_EQ(IrOpcode::kLoop,
                NodeProperties::GetControlInput(node)->opcode());
       // Terminate uses are End.
-      for (auto use : node->uses()) {
+      for (const Node* use : node->uses()) {
         CHECK_EQ(IrOpcode::kEnd, use->opcode());
       }
       // Type is empty.
@@ -492,6 +492,18 @@
       // Type is Boolean.
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kJSToInteger:
+      // Type is OrderedNumber.
+      CheckUpperIs(node, Type::OrderedNumber());
+      break;
+    case IrOpcode::kJSToLength:
+      // Type is OrderedNumber.
+      CheckUpperIs(node, Type::OrderedNumber());
+      break;
+    case IrOpcode::kJSToName:
+      // Type is Name.
+      CheckUpperIs(node, Type::Name());
+      break;
     case IrOpcode::kJSToNumber:
       // Type is Number.
       CheckUpperIs(node, Type::Number());
@@ -500,10 +512,6 @@
       // Type is String.
       CheckUpperIs(node, Type::String());
       break;
-    case IrOpcode::kJSToName:
-      // Type is Name.
-      CheckUpperIs(node, Type::Name());
-      break;
     case IrOpcode::kJSToObject:
       // Type is Receiver.
       CheckUpperIs(node, Type::Receiver());
@@ -677,6 +685,25 @@
       CheckValueInputIs(node, 1, Type::Unsigned32());
       CheckUpperIs(node, Type::Unsigned32());
       break;
+    case IrOpcode::kNumberImul:
+      // (Unsigned32, Unsigned32) -> Signed32
+      CheckValueInputIs(node, 0, Type::Unsigned32());
+      CheckValueInputIs(node, 1, Type::Unsigned32());
+      CheckUpperIs(node, Type::Signed32());
+      break;
+    case IrOpcode::kNumberClz32:
+      // Unsigned32 -> Unsigned32
+      CheckValueInputIs(node, 0, Type::Unsigned32());
+      CheckUpperIs(node, Type::Unsigned32());
+      break;
+    case IrOpcode::kNumberCeil:
+    case IrOpcode::kNumberFloor:
+    case IrOpcode::kNumberRound:
+    case IrOpcode::kNumberTrunc:
+      // Number -> Number
+      CheckValueInputIs(node, 0, Type::Number());
+      CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kNumberToInt32:
       // Number -> Signed32
       CheckValueInputIs(node, 0, Type::Number());
@@ -705,6 +732,11 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckUpperIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringToNumber:
+      // String -> Number
+      CheckValueInputIs(node, 0, Type::String());
+      CheckUpperIs(node, Type::Number());
+      break;
     case IrOpcode::kReferenceEqual: {
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
@@ -714,6 +746,7 @@
     case IrOpcode::kObjectIsNumber:
     case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
+    case IrOpcode::kObjectIsUndetectable:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::Boolean());
       break;
@@ -935,6 +968,7 @@
     case IrOpcode::kChangeFloat32ToFloat64:
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kTruncateFloat64ToUint32:
     case IrOpcode::kTruncateFloat32ToInt32:
     case IrOpcode::kTruncateFloat32ToUint32:
     case IrOpcode::kTryTruncateFloat32ToInt64:
@@ -945,6 +979,12 @@
     case IrOpcode::kFloat64ExtractHighWord32:
     case IrOpcode::kFloat64InsertLowWord32:
     case IrOpcode::kFloat64InsertHighWord32:
+    case IrOpcode::kInt32PairAdd:
+    case IrOpcode::kInt32PairSub:
+    case IrOpcode::kInt32PairMul:
+    case IrOpcode::kWord32PairShl:
+    case IrOpcode::kWord32PairShr:
+    case IrOpcode::kWord32PairSar:
     case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
     case IrOpcode::kLoadParentFramePointer:
@@ -959,7 +999,7 @@
 void Verifier::Run(Graph* graph, Typing typing) {
   CHECK_NOT_NULL(graph->start());
   CHECK_NOT_NULL(graph->end());
-  Zone zone;
+  Zone zone(graph->zone()->allocator());
   Visitor visitor(&zone, typing);
   AllNodes all(&zone, graph);
   for (Node* node : all.live) visitor.Check(node);
@@ -1049,7 +1089,7 @@
 
 void ScheduleVerifier::Run(Schedule* schedule) {
   const size_t count = schedule->BasicBlockCount();
-  Zone tmp_zone;
+  Zone tmp_zone(schedule->zone()->allocator());
   Zone* zone = &tmp_zone;
   BasicBlock* start = schedule->start();
   BasicBlockVector* rpo_order = schedule->rpo_order();
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 9c3858d..93d5a08 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -6,6 +6,7 @@
 
 #include "src/isolate-inl.h"
 
+#include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
 
 #include "src/compiler/access-builder.h"
@@ -71,28 +72,8 @@
   }
 }
 
-
-enum TrapReason {
-  kTrapUnreachable,
-  kTrapMemOutOfBounds,
-  kTrapDivByZero,
-  kTrapDivUnrepresentable,
-  kTrapRemByZero,
-  kTrapFloatUnrepresentable,
-  kTrapFuncInvalid,
-  kTrapFuncSigMismatch,
-  kTrapCount
-};
-
-
-static const char* kTrapMessages[] = {
-    "unreachable",       "memory access out of bounds",
-    "divide by zero",    "divide result unrepresentable",
-    "remainder by zero", "integer result unrepresentable",
-    "invalid function",  "function signature mismatch"};
 }  // namespace
 
-
 // A helper that handles building graph fragments for trapping.
 // To avoid generating a ton of redundant code that just calls the runtime
 // to trap, we generate a per-trap-reason block of code that all trap sites
@@ -103,17 +84,17 @@
       : builder_(builder),
         jsgraph_(builder->jsgraph()),
         graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
-    for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+    for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
   }
 
   // Make the current control path trap to unreachable.
-  void Unreachable() { ConnectTrap(kTrapUnreachable); }
+  void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
 
   // Always trap with the given reason.
-  void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+  void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+  Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
     Int32Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
     if (val == 0) {
@@ -127,12 +108,12 @@
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck32(TrapReason reason, Node* node) {
+  Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
     return TrapIfEq32(reason, node, 0);
   }
 
   // Add a check that traps if {node} is equal to {val}.
-  Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+  Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
     Int64Matcher m(node);
     if (m.HasValue() && !m.Is(val)) return graph()->start();
     AddTrapIfTrue(reason,
@@ -142,22 +123,22 @@
   }
 
   // Add a check that traps if {node} is zero.
-  Node* ZeroCheck64(TrapReason reason, Node* node) {
+  Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
     return TrapIfEq64(reason, node, 0);
   }
 
   // Add a trap if {cond} is true.
-  void AddTrapIfTrue(TrapReason reason, Node* cond) {
+  void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
     AddTrapIf(reason, cond, true);
   }
 
   // Add a trap if {cond} is false.
-  void AddTrapIfFalse(TrapReason reason, Node* cond) {
+  void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
     AddTrapIf(reason, cond, false);
   }
 
   // Add a trap if {cond} is true or false according to {iftrue}.
-  void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
     Node** effect_ptr = builder_->effect_;
     Node** control_ptr = builder_->control_;
     Node* before = *effect_ptr;
@@ -198,14 +179,14 @@
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
   Graph* graph_;
-  Node* traps_[kTrapCount];
-  Node* effects_[kTrapCount];
+  Node* traps_[wasm::kTrapCount];
+  Node* effects_[wasm::kTrapCount];
 
   JSGraph* jsgraph() { return jsgraph_; }
   Graph* graph() { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() { return jsgraph()->common(); }
 
-  void ConnectTrap(TrapReason reason) {
+  void ConnectTrap(wasm::TrapReason reason) {
     if (traps_[reason] == nullptr) {
       // Create trap code for the first time this trap is used.
       return BuildTrapCode(reason);
@@ -215,8 +196,9 @@
     builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
   }
 
-  void BuildTrapCode(TrapReason reason) {
-    Node* exception = builder_->String(kTrapMessages[reason]);
+  void BuildTrapCode(wasm::TrapReason reason) {
+    Node* exception =
+        builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
     Node* end;
     Node** control_ptr = builder_->control_;
     Node** effect_ptr = builder_->effect_;
@@ -265,7 +247,6 @@
   }
 };
 
-
 WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
                                    wasm::FunctionSig* function_signature)
     : zone_(zone),
@@ -351,8 +332,7 @@
 Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(vals, count);
-  buf = Realloc(buf, count + 1);
+  Node** buf = Realloc(vals, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
                           buf);
@@ -362,8 +342,7 @@
 Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
                                   Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
-  Node** buf = Realloc(effects, count);
-  buf = Realloc(buf, count + 1);
+  Node** buf = Realloc(effects, count, count + 1);
   buf[count] = control;
   return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
                           buf);
@@ -394,43 +373,14 @@
     case wasm::kExprI32Mul:
       op = m->Int32Mul();
       break;
-    case wasm::kExprI32DivS: {
-      trap_->ZeroCheck32(kTrapDivByZero, right);
-      Node* before = *control_;
-      Node* denom_is_m1;
-      Node* denom_is_not_m1;
-      Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
-                              jsgraph()->Int32Constant(-1)),
-             &denom_is_m1, &denom_is_not_m1);
-      *control_ = denom_is_m1;
-      trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
-      if (*control_ != denom_is_m1) {
-        *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
-                                     denom_is_not_m1, *control_);
-      } else {
-        *control_ = before;
-      }
-      return graph()->NewNode(m->Int32Div(), left, right, *control_);
-    }
+    case wasm::kExprI32DivS:
+      return BuildI32DivS(left, right);
     case wasm::kExprI32DivU:
-      op = m->Uint32Div();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck32(kTrapDivByZero, right));
-    case wasm::kExprI32RemS: {
-      trap_->ZeroCheck32(kTrapRemByZero, right);
-      Diamond d(graph(), jsgraph()->common(),
-                graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
-                                 jsgraph()->Int32Constant(-1)));
-
-      Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
-
-      return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-                   rem);
-    }
+      return BuildI32DivU(left, right);
+    case wasm::kExprI32RemS:
+      return BuildI32RemS(left, right);
     case wasm::kExprI32RemU:
-      op = m->Uint32Mod();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck32(kTrapRemByZero, right));
+      return BuildI32RemU(left, right);
     case wasm::kExprI32And:
       op = m->Word32And();
       break;
@@ -442,13 +392,23 @@
       break;
     case wasm::kExprI32Shl:
       op = m->Word32Shl();
+      right = MaskShiftCount32(right);
       break;
     case wasm::kExprI32ShrU:
       op = m->Word32Shr();
+      right = MaskShiftCount32(right);
       break;
     case wasm::kExprI32ShrS:
       op = m->Word32Sar();
+      right = MaskShiftCount32(right);
       break;
+    case wasm::kExprI32Ror:
+      op = m->Word32Ror();
+      right = MaskShiftCount32(right);
+      break;
+    case wasm::kExprI32Rol:
+      right = MaskShiftCount32(right);
+      return BuildI32Rol(left, right);
     case wasm::kExprI32Eq:
       op = m->Word32Equal();
       break;
@@ -485,76 +445,62 @@
     case wasm::kExprI64And:
       op = m->Word64And();
       break;
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
+    // todo(ahaas): I added a list of missing instructions here to make merging
+    // easier when I do them one by one.
+    // kExprI64Add:
     case wasm::kExprI64Add:
       op = m->Int64Add();
       break;
+    // kExprI64Sub:
     case wasm::kExprI64Sub:
       op = m->Int64Sub();
       break;
+    // kExprI64Mul:
     case wasm::kExprI64Mul:
       op = m->Int64Mul();
       break;
-    case wasm::kExprI64DivS: {
-      trap_->ZeroCheck64(kTrapDivByZero, right);
-      Node* before = *control_;
-      Node* denom_is_m1;
-      Node* denom_is_not_m1;
-      Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
-                              jsgraph()->Int64Constant(-1)),
-             &denom_is_m1, &denom_is_not_m1);
-      *control_ = denom_is_m1;
-      trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
-                        std::numeric_limits<int64_t>::min());
-      if (*control_ != denom_is_m1) {
-        *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
-                                     denom_is_not_m1, *control_);
-      } else {
-        *control_ = before;
-      }
-      return graph()->NewNode(m->Int64Div(), left, right, *control_);
-    }
+    // kExprI64DivS:
+    case wasm::kExprI64DivS:
+      return BuildI64DivS(left, right);
+    // kExprI64DivU:
     case wasm::kExprI64DivU:
-      op = m->Uint64Div();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck64(kTrapDivByZero, right));
-    case wasm::kExprI64RemS: {
-      trap_->ZeroCheck64(kTrapRemByZero, right);
-      Diamond d(jsgraph()->graph(), jsgraph()->common(),
-                graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
-                                 jsgraph()->Int64Constant(-1)));
-
-      Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
-
-      return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
-                   rem);
-    }
+      return BuildI64DivU(left, right);
+    // kExprI64RemS:
+    case wasm::kExprI64RemS:
+      return BuildI64RemS(left, right);
+    // kExprI64RemU:
     case wasm::kExprI64RemU:
-      op = m->Uint64Mod();
-      return graph()->NewNode(op, left, right,
-                              trap_->ZeroCheck64(kTrapRemByZero, right));
+      return BuildI64RemU(left, right);
     case wasm::kExprI64Ior:
       op = m->Word64Or();
       break;
+// kExprI64Xor:
     case wasm::kExprI64Xor:
       op = m->Word64Xor();
       break;
+// kExprI64Shl:
     case wasm::kExprI64Shl:
       op = m->Word64Shl();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64ShrU:
     case wasm::kExprI64ShrU:
       op = m->Word64Shr();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64ShrS:
     case wasm::kExprI64ShrS:
       op = m->Word64Sar();
+      right = MaskShiftCount64(right);
       break;
+    // kExprI64Eq:
     case wasm::kExprI64Eq:
       op = m->Word64Equal();
       break;
+// kExprI64Ne:
     case wasm::kExprI64Ne:
       return Invert(Binop(wasm::kExprI64Eq, left, right));
+// kExprI64LtS:
     case wasm::kExprI64LtS:
       op = m->Int64LessThan();
       break;
@@ -583,8 +529,12 @@
       op = m->Uint64LessThanOrEqual();
       std::swap(left, right);
       break;
-#endif
-
+    case wasm::kExprI64Ror:
+      op = m->Word64Ror();
+      right = MaskShiftCount64(right);
+      break;
+    case wasm::kExprI64Rol:
+      return BuildI64Rol(left, right);
     case wasm::kExprF32CopySign:
       return BuildF32CopySign(left, right);
     case wasm::kExprF64CopySign:
@@ -659,6 +609,15 @@
       return BuildF32Max(left, right);
     case wasm::kExprF64Max:
       return BuildF64Max(left, right);
+    case wasm::kExprF64Pow: {
+      return BuildF64Pow(left, right);
+    }
+    case wasm::kExprF64Atan2: {
+      return BuildF64Atan2(left, right);
+    }
+    case wasm::kExprF64Mod: {
+      return BuildF64Mod(left, right);
+    }
     default:
       op = UnsupportedOpcode(opcode);
   }
@@ -670,7 +629,7 @@
   const Operator* op;
   MachineOperatorBuilder* m = jsgraph()->machine();
   switch (opcode) {
-    case wasm::kExprBoolNot:
+    case wasm::kExprI32Eqz:
       op = m->Word32Equal();
       return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
     case wasm::kExprF32Abs:
@@ -786,79 +745,62 @@
       op = m->Float64RoundTiesEven().op();
       break;
     }
+    case wasm::kExprF64Acos: {
+      return BuildF64Acos(input);
+    }
+    case wasm::kExprF64Asin: {
+      return BuildF64Asin(input);
+    }
+    case wasm::kExprF64Atan: {
+      return BuildF64Atan(input);
+    }
+    case wasm::kExprF64Cos: {
+      return BuildF64Cos(input);
+    }
+    case wasm::kExprF64Sin: {
+      return BuildF64Sin(input);
+    }
+    case wasm::kExprF64Tan: {
+      return BuildF64Tan(input);
+    }
+    case wasm::kExprF64Exp: {
+      return BuildF64Exp(input);
+    }
+    case wasm::kExprF64Log: {
+      return BuildF64Log(input);
+    }
+    // kExprI32ConvertI64:
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
+    // kExprI64SConvertI32:
     case wasm::kExprI64SConvertI32:
       op = m->ChangeInt32ToInt64();
       break;
+    // kExprI64UConvertI32:
     case wasm::kExprI64UConvertI32:
       op = m->ChangeUint32ToUint64();
       break;
-    case wasm::kExprF32SConvertI64:
-      op = m->RoundInt64ToFloat32();
-      break;
-    case wasm::kExprF32UConvertI64:
-      op = m->RoundUint64ToFloat32();
-      break;
-    case wasm::kExprF64SConvertI64:
-      op = m->RoundInt64ToFloat64();
-      break;
-    case wasm::kExprF64UConvertI64:
-      op = m->RoundUint64ToFloat64();
-      break;
-    case wasm::kExprI64SConvertF32: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64SConvertF64: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64UConvertF32: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
-    case wasm::kExprI64UConvertF64: {
-      Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
-      Node* result =
-          graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
-      Node* overflow =
-          graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
-      trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
-      return result;
-    }
+    // kExprF64ReinterpretI64:
     case wasm::kExprF64ReinterpretI64:
       op = m->BitcastInt64ToFloat64();
       break;
+    // kExprI64ReinterpretF64:
     case wasm::kExprI64ReinterpretF64:
       op = m->BitcastFloat64ToInt64();
       break;
+    // kExprI64Clz:
     case wasm::kExprI64Clz:
       op = m->Word64Clz();
       break;
+    // kExprI64Ctz:
     case wasm::kExprI64Ctz: {
       if (m->Word64Ctz().IsSupported()) {
         op = m->Word64Ctz().op();
         break;
+      } else if (m->Is32() && m->Word32Ctz().IsSupported()) {
+        op = m->Word64CtzPlaceholder();
+        break;
       } else if (m->Word64ReverseBits().IsSupported()) {
         Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
         Node* result = graph()->NewNode(m->Word64Clz(), reversed);
@@ -867,15 +809,64 @@
         return BuildI64Ctz(input);
       }
     }
+    // kExprI64Popcnt:
     case wasm::kExprI64Popcnt: {
       if (m->Word64Popcnt().IsSupported()) {
         op = m->Word64Popcnt().op();
-        break;
+      } else if (m->Is32() && m->Word32Popcnt().IsSupported()) {
+        op = m->Word64PopcntPlaceholder();
       } else {
         return BuildI64Popcnt(input);
       }
+      break;
     }
-#endif
+    // kExprF32SConvertI64:
+    case wasm::kExprI64Eqz:
+      op = m->Word64Equal();
+      return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
+    case wasm::kExprF32SConvertI64:
+      if (m->Is32()) {
+        return BuildF32SConvertI64(input);
+      }
+      op = m->RoundInt64ToFloat32();
+      break;
+    // kExprF32UConvertI64:
+    case wasm::kExprF32UConvertI64:
+      if (m->Is32()) {
+        return BuildF32UConvertI64(input);
+      }
+      op = m->RoundUint64ToFloat32();
+      break;
+    // kExprF64SConvertI64:
+    case wasm::kExprF64SConvertI64:
+      if (m->Is32()) {
+        return BuildF64SConvertI64(input);
+      }
+      op = m->RoundInt64ToFloat64();
+      break;
+    // kExprF64UConvertI64:
+    case wasm::kExprF64UConvertI64:
+      if (m->Is32()) {
+        return BuildF64UConvertI64(input);
+      }
+      op = m->RoundUint64ToFloat64();
+      break;
+// kExprI64SConvertF32:
+    case wasm::kExprI64SConvertF32: {
+      return BuildI64SConvertF32(input);
+    }
+    // kExprI64SConvertF64:
+    case wasm::kExprI64SConvertF64: {
+      return BuildI64SConvertF64(input);
+    }
+    // kExprI64UConvertF32:
+    case wasm::kExprI64UConvertF32: {
+      return BuildI64UConvertF32(input);
+    }
+    // kExprI64UConvertF64:
+    case wasm::kExprI64UConvertF64: {
+      return BuildI64UConvertF64(input);
+    }
     default:
       op = UnsupportedOpcode(opcode);
   }
@@ -937,8 +928,7 @@
     count = 1;
   }
 
-  Node** buf = Realloc(vals, count);
-  buf = Realloc(buf, count + 2);
+  Node** buf = Realloc(vals, count, count + 2);
   buf[count] = *effect_;
   buf[count + 1] = *control_;
   Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
@@ -956,6 +946,37 @@
   return nullptr;
 }
 
+Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
+  static const int32_t kMask32 = 0x1f;
+  if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+    // Shifts by constants are so common we pattern-match them here.
+    Int32Matcher match(node);
+    if (match.HasValue()) {
+      int32_t masked = (match.Value() & kMask32);
+      if (match.Value() != masked) node = jsgraph()->Int32Constant(masked);
+    } else {
+      node = graph()->NewNode(jsgraph()->machine()->Word32And(), node,
+                              jsgraph()->Int32Constant(kMask32));
+    }
+  }
+  return node;
+}
+
+Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
+  static const int64_t kMask64 = 0x3f;
+  if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
+    // Shifts by constants are so common we pattern-match them here.
+    Int64Matcher match(node);
+    if (match.HasValue()) {
+      int64_t masked = (match.Value() & kMask64);
+      if (match.Value() != masked) node = jsgraph()->Int64Constant(masked);
+    } else {
+      node = graph()->NewNode(jsgraph()->machine()->Word64And(), node,
+                              jsgraph()->Int64Constant(kMask64));
+    }
+  }
+  return node;
+}
 
 Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
   Node* result =
@@ -1115,6 +1136,13 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
+    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
+
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1123,7 +1151,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1131,7 +1159,8 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
     return graph()->NewNode(
         m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
   }
@@ -1143,7 +1172,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64SConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1151,6 +1180,13 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
+    input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
+
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
   Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1159,7 +1195,7 @@
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF32UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1167,19 +1203,20 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
+    // asm.js must use the wacky JS semantics.
     return graph()->NewNode(
         m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
   }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
 
   // Convert the result back to f64. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
   Node* check = Unop(wasm::kExprF64UConvertI32, result);
   Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
-  trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+  trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
 
   return result;
 }
@@ -1363,89 +1400,557 @@
 Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_trunc(jsgraph()->isolate());
+
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_floor(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_ceil(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
   MachineType type = MachineType::Float32();
   ExternalReference ref =
-      ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f32_nearest_int(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_trunc(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_floor(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_ceil(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
 Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
   MachineType type = MachineType::Float64();
   ExternalReference ref =
-      ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
-  return BuildRoundingInstruction(input, ref, type);
+      ExternalReference::wasm_f64_nearest_int(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
 }
 
-Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
-                                                 ExternalReference ref,
-                                                 MachineType type) {
-  // We do truncation by calling a C function which calculates the truncation
-  // for us. The input is passed to the C function as a double* to avoid double
-  // parameters. For this we reserve a slot on the stack, store the parameter in
-  // that slot, pass a pointer to the slot to the C function, and after calling
-  // the C function we collect the return value from the stack slot.
+Node* WasmGraphBuilder::BuildF64Acos(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_acos_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
 
-  Node* stack_slot_param =
+Node* WasmGraphBuilder::BuildF64Asin(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_asin_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Log(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, input);
+}
+
+Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_pow_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildF64Mod(Node* left, Node* right) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_mod_wrapper_function(jsgraph()->isolate());
+  return BuildCFuncInstruction(ref, type, left, right);
+}
+
+Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
+                                              MachineType type, Node* input0,
+                                              Node* input1) {
+  // We do truncation by calling a C function which calculates the result.
+  // The input is passed to the C function as a double*'s to avoid double
+  // parameters. For this we reserve slots on the stack, store the parameters
+  // in those slots, pass pointers to the slot to the C function,
+  // and after calling the C function we collect the return value from
+  // the stack slot.
+
+  Node* stack_slot_param0 =
       graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
 
-  const Operator* store_op = jsgraph()->machine()->Store(
+  const Operator* store_op0 = jsgraph()->machine()->Store(
       StoreRepresentation(type.representation(), kNoWriteBarrier));
-  *effect_ =
-      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
-                       input, *effect_, *control_);
+  *effect_ = graph()->NewNode(store_op0, stack_slot_param0,
+                              jsgraph()->Int32Constant(0), input0, *effect_,
+                              *control_);
 
-  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
-  sig_builder.AddParam(MachineType::Pointer());
   Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node** args = Buffer(5);
+  args[0] = function;
+  args[1] = stack_slot_param0;
+  int input_count = 1;
 
-  Node* args[] = {function, stack_slot_param};
+  if (input1 != nullptr) {
+    Node* stack_slot_param1 = graph()->NewNode(
+        jsgraph()->machine()->StackSlot(type.representation()));
+    const Operator* store_op1 = jsgraph()->machine()->Store(
+        StoreRepresentation(type.representation(), kNoWriteBarrier));
+    *effect_ = graph()->NewNode(store_op1, stack_slot_param1,
+                                jsgraph()->Int32Constant(0), input1, *effect_,
+                                *control_);
+    args[2] = stack_slot_param1;
+    ++input_count;
+  }
 
+  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0,
+                                              input_count);
+  sig_builder.AddParam(MachineType::Pointer());
+  if (input1 != nullptr) {
+    sig_builder.AddParam(MachineType::Pointer());
+  }
   BuildCCall(sig_builder.Build(), args);
 
   const Operator* load_op = jsgraph()->machine()->Load(type);
 
   Node* load =
-      graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+      graph()->NewNode(load_op, stack_slot_param0, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
+  // TODO(titzer/bradnelson): Check handlng of asm.js case.
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_int64_to_float32(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF32UConvertI64(Node* input) {
+  // TODO(titzer/bradnelson): Check handlng of asm.js case.
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_uint64_to_float32(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float32());
+}
+Node* WasmGraphBuilder::BuildF64SConvertI64(Node* input) {
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_int64_to_float64(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float64());
+}
+Node* WasmGraphBuilder::BuildF64UConvertI64(Node* input) {
+  return BuildIntToFloatConversionInstruction(
+      input, ExternalReference::wasm_uint64_to_float64(jsgraph()->isolate()),
+      MachineRepresentation::kWord64, MachineType::Float64());
+}
+
+Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
+    Node* input, ExternalReference ref,
+    MachineRepresentation parameter_representation,
+    const MachineType result_type) {
+  Node* stack_slot_param = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(parameter_representation));
+  Node* stack_slot_result = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(result_type.representation()));
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(parameter_representation, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 0, 2);
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_param, stack_slot_result};
+  BuildCCall(sig_builder.Build(), args);
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat32, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat32, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat64, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildFloatToIntConversionInstruction(
+        input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
+        MachineRepresentation::kFloat64, MachineType::Int64());
+  } else {
+    Node* trunc = graph()->NewNode(
+        jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
+    Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+    Node* overflow =
+        graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+    trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+    return result;
+  }
+}
+
+Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
+    Node* input, ExternalReference ref,
+    MachineRepresentation parameter_representation,
+    const MachineType result_type) {
+  Node* stack_slot_param = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(parameter_representation));
+  Node* stack_slot_result = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(result_type.representation()));
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(parameter_representation, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+  sig_builder.AddReturn(MachineType::Int32());
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_param, stack_slot_result};
+  trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
+                     BuildCCall(sig_builder.Build(), args));
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    if (m->Int32DivIsSafe()) {
+      // The hardware instruction does the right thing (e.g. arm).
+      return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
+    }
+
+    // Check denominator for zero.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    // Check numerator for -1. (avoid minint / -1 case).
+    Diamond n(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+        BranchHint::kFalse);
+
+    Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+    Node* neg =
+        graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+    return n.Phi(MachineRepresentation::kWord32, neg,
+                 z.Phi(MachineRepresentation::kWord32,
+                       jsgraph()->Int32Constant(0), div));
+  }
+
+  trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+  Node* before = *control_;
+  Node* denom_is_m1;
+  Node* denom_is_not_m1;
+  Branch(
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      &denom_is_m1, &denom_is_not_m1);
+  *control_ = denom_is_m1;
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+  if (*control_ != denom_is_m1) {
+    *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+                                 *control_);
+  } else {
+    *control_ = before;
+  }
+  return graph()->NewNode(m->Int32Div(), left, right, *control_);
+}
+
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    // Explicit check for x % -1.
+    Diamond d(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+        BranchHint::kFalse);
+    d.Chain(z.if_false);
+
+    return z.Phi(
+        MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+        d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+              graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+  }
+
+  trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+
+  Diamond d(
+      graph(), jsgraph()->common(),
+      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+      BranchHint::kFalse);
+  d.Chain(*control_);
+
+  return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+               graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    if (m->Uint32DivIsSafe()) {
+      // The hardware instruction does the right thing (e.g. arm).
+      return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
+    }
+
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+                 graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
+                                  right, z.if_false));
+  }
+  return graph()->NewNode(m->Uint32Div(), left, right,
+                          trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+  MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js()) {
+    // asm.js semantics return 0 on divide or mod by zero.
+    // Explicit check for x % 0.
+    Diamond z(
+        graph(), jsgraph()->common(),
+        graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+        BranchHint::kFalse);
+
+    Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+                                 z.if_false);
+    return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+                 rem);
+  }
+
+  return graph()->NewNode(m->Uint32Mod(), left, right,
+                          trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapDivByZero);
+  }
+  trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+  Node* before = *control_;
+  Node* denom_is_m1;
+  Node* denom_is_not_m1;
+  Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+                          jsgraph()->Int64Constant(-1)),
+         &denom_is_m1, &denom_is_not_m1);
+  *control_ = denom_is_m1;
+  trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
+                    std::numeric_limits<int64_t>::min());
+  if (*control_ != denom_is_m1) {
+    *control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
+                                 *control_);
+  } else {
+    *control_ = before;
+  }
+  return graph()->NewNode(jsgraph()->machine()->Int64Div(), left, right,
+                          *control_);
+}
+
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapRemByZero);
+  }
+  trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+  Diamond d(jsgraph()->graph(), jsgraph()->common(),
+            graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+                             jsgraph()->Int64Constant(-1)));
+
+  Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
+                               d.if_false);
+
+  return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+               rem);
+}
+
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapDivByZero);
+  }
+  return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
+                          trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+}
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+  if (jsgraph()->machine()->Is32()) {
+    return BuildDiv64Call(
+        left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
+        MachineType::Int64(), wasm::kTrapRemByZero);
+  }
+  return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
+                          trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+}
+
+Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
+                                       ExternalReference ref,
+                                       MachineType result_type, int trap_zero) {
+  Node* stack_slot_dst = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+  Node* stack_slot_src = graph()->NewNode(
+      jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
+
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_dst, jsgraph()->Int32Constant(0),
+                       left, *effect_, *control_);
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_src, jsgraph()->Int32Constant(0),
+                       right, *effect_, *control_);
+
+  MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2);
+  sig_builder.AddReturn(MachineType::Int32());
+  sig_builder.AddParam(MachineType::Pointer());
+  sig_builder.AddParam(MachineType::Pointer());
+
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+  Node* args[] = {function, stack_slot_dst, stack_slot_src};
+
+  Node* call = BuildCCall(sig_builder.Build(), args);
+
+  // TODO(wasm): This can get simpler if we have a specialized runtime call to
+  // throw WASM exceptions by trap code instead of by string.
+  trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
+  trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+  const Operator* load_op = jsgraph()->machine()->Load(result_type);
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
                        *effect_, *control_);
   *effect_ = load;
   return load;
@@ -1457,7 +1962,7 @@
   const size_t count = 1 + params + extra;
 
   // Reallocate the buffer to make space for extra inputs.
-  args = Realloc(args, count);
+  args = Realloc(args, 1 + params, count);
 
   // Add effect and control inputs.
   args[params + 1] = *effect_;
@@ -1478,7 +1983,7 @@
   const size_t count = 1 + params + extra;
 
   // Reallocate the buffer to make space for extra inputs.
-  args = Realloc(args, count);
+  args = Realloc(args, 1 + params, count);
 
   // Add effect and control inputs.
   args[params + 1] = *effect_;
@@ -1493,7 +1998,6 @@
   return call;
 }
 
-
 Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
   DCHECK_NULL(args[0]);
 
@@ -1529,10 +2033,10 @@
     // Bounds check against the table size.
     Node* size = Int32Constant(static_cast<int>(table_size));
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
-    trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
   } else {
     // No function table. Generate a trap and return a constant.
-    trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+    trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
     return trap_->GetTrapValue(module_->GetSignature(index));
   }
   Node* table = FunctionTable();
@@ -1552,7 +2056,7 @@
         *effect_, *control_);
     Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
                                        jsgraph()->SmiConstant(index));
-    trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+    trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
   }
 
   // Load code object from the table.
@@ -1640,9 +2144,34 @@
   return num;
 }
 
+Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
+  // Implement Rol by Ror since TurboFan does not have Rol opcode.
+  // TODO(weiliang): support Word32Rol opcode in TurboFan.
+  Int32Matcher m(right);
+  if (m.HasValue()) {
+    return Binop(wasm::kExprI32Ror, left,
+                 jsgraph()->Int32Constant(32 - m.Value()));
+  } else {
+    return Binop(wasm::kExprI32Ror, left,
+                 Binop(wasm::kExprI32Sub, jsgraph()->Int32Constant(32), right));
+  }
+}
+
+Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
+  // Implement Rol by Ror since TurboFan does not have Rol opcode.
+  // TODO(weiliang): support Word64Rol opcode in TurboFan.
+  Int64Matcher m(right);
+  if (m.HasValue()) {
+    return Binop(wasm::kExprI64Ror, left,
+                 jsgraph()->Int64Constant(64 - m.Value()));
+  } else {
+    return Binop(wasm::kExprI64Ror, left,
+                 Binop(wasm::kExprI64Sub, jsgraph()->Int64Constant(64), right));
+  }
+}
 
 Node* WasmGraphBuilder::Invert(Node* node) {
-  return Unop(wasm::kExprBoolNot, node);
+  return Unop(wasm::kExprI32Eqz, node);
 }
 
 
@@ -1653,19 +2182,22 @@
   Node** args = Buffer(count);
 
   // Build the start and the JS parameter nodes.
-  Node* start = Start(params + 3);
+  Node* start = Start(params + 5);
   *control_ = start;
   *effect_ = start;
-  // JS context is the last parameter.
+  // Create the context parameter
   Node* context = graph()->NewNode(
-      jsgraph()->common()->Parameter(params + 1, "context"), start);
+      jsgraph()->common()->Parameter(
+          Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+      graph()->start());
 
   int pos = 0;
   args[pos++] = Constant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
   for (int i = 0; i < params; i++) {
-    Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+    Node* param =
+        graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
     args[pos++] = FromJS(param, context, sig->GetParam(i));
   }
 
@@ -1800,7 +2332,7 @@
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals->at(index).offset));
+                                  module_->module->globals[index].offset));
   const Operator* op = jsgraph()->machine()->Load(mem_type);
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
                                 *control_);
@@ -1814,7 +2346,7 @@
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
       reinterpret_cast<uintptr_t>(module_->instance->globals_start +
-                                  module_->module->globals->at(index).offset));
+                                  module_->module->globals[index].offset));
   const Operator* op = jsgraph()->machine()->Store(
       StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1843,7 +2375,7 @@
         jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
   }
 
-  trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+  trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
 }
 
 
@@ -1851,7 +2383,7 @@
                                 Node* index, uint32_t offset) {
   Node* load;
 
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
     // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
     DCHECK_EQ(0, offset);
     const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
@@ -1886,7 +2418,7 @@
 Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
                                  uint32_t offset, Node* val) {
   Node* store;
-  if (module_ && module_->asm_js) {
+  if (module_ && module_->asm_js()) {
     // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
     DCHECK_EQ(0, offset);
     const Operator* op =
@@ -1920,7 +2452,7 @@
 Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
 
 void WasmGraphBuilder::Int64LoweringForTesting() {
-  if (kPointerSize == 4) {
+  if (jsgraph()->machine()->Is32()) {
     Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
                     jsgraph()->common(), jsgraph()->zone(),
                     function_signature_);
@@ -1931,12 +2463,13 @@
 static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                       CompilationInfo* info,
                                       const char* message, uint32_t index,
-                                      const char* func_name) {
+                                      wasm::WasmName func_name) {
   Isolate* isolate = info->isolate();
   if (isolate->logger()->is_logging_code_events() ||
       isolate->cpu_profiler()->is_profiling()) {
     ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+    SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
+             func_name.name);
     Handle<String> name_str =
         isolate->factory()->NewStringFromAsciiChecked(buffer.start());
     Handle<String> script_str =
@@ -1944,15 +2477,15 @@
     Handle<Code> code = info->code();
     Handle<SharedFunctionInfo> shared =
         isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
-    PROFILE(isolate,
-            CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+    PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
+                                     info, *script_str, 0, 0));
   }
 }
 
 Handle<JSFunction> CompileJSToWasmWrapper(
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
     Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
-  wasm::WasmFunction* func = &module->module->functions->at(index);
+  wasm::WasmFunction* func = &module->module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the JSFunction object.
@@ -1961,7 +2494,7 @@
       isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
   int params = static_cast<int>(func->sig->parameter_count());
   shared->set_length(params);
-  shared->set_internal_formal_parameter_count(1 + params);
+  shared->set_internal_formal_parameter_count(params);
   Handle<JSFunction> function = isolate->factory()->NewFunction(
       isolate->wasm_function_map(), name, MaybeHandle<Code>());
   function->SetInternalField(0, *module_object);
@@ -1970,7 +2503,7 @@
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   JSOperatorBuilder javascript(&zone);
@@ -2015,8 +2548,7 @@
         module->GetFunctionSignature(index)->parameter_count());
     CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
         &zone, false, params + 1, CallDescriptor::kNoFlags);
-    // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
     bool debugging =
 #if DEBUG
         true;
@@ -2036,12 +2568,19 @@
     CompilationInfo info(func_name, isolate, &zone, flags);
     Handle<Code> code =
         Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(buffer.start(), os);
+    }
+#endif
     if (debugging) {
       buffer.Dispose();
     }
 
-    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
-                              module->module->GetName(func->name_offset));
+    RecordFunctionCompilation(
+        Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+        module->module->GetName(func->name_offset, func->name_length));
     // Set the JSFunction's machine code.
     function->set_code(*code);
   }
@@ -2050,11 +2589,13 @@
 
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig, const char* name) {
+                                    wasm::FunctionSig* sig,
+                                    wasm::WasmName module_name,
+                                    wasm::WasmName function_name) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   JSOperatorBuilder javascript(&zone);
@@ -2095,8 +2636,7 @@
     // Schedule and compile to machine code.
     CallDescriptor* incoming =
         wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
-    // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
     bool debugging =
 #if DEBUG
         true;
@@ -2114,12 +2654,18 @@
 
     CompilationInfo info(func_name, isolate, &zone, flags);
     code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(buffer.start(), os);
+    }
+#endif
     if (debugging) {
       buffer.Dispose();
     }
 
     RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
-                              name);
+                              module_name);
   }
   return code;
 }
@@ -2129,24 +2675,21 @@
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
                                  const wasm::WasmFunction& function) {
-  if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+  if (FLAG_trace_wasm_compiler) {
     OFStream os(stdout);
     os << "Compiling WASM function "
        << wasm::WasmFunctionName(&function, module_env) << std::endl;
     os << std::endl;
   }
-  // Initialize the function environment for decoding.
-  wasm::FunctionEnv env;
-  env.module = module_env;
-  env.sig = function.sig;
-  env.local_i32_count = function.local_i32_count;
-  env.local_i64_count = function.local_i64_count;
-  env.local_f32_count = function.local_f32_count;
-  env.local_f64_count = function.local_f64_count;
-  env.SumLocals();
+
+  double decode_ms = 0;
+  base::ElapsedTimer decode_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    decode_timer.Start();
+  }
 
   // Create a TF graph during decoding.
-  Zone zone;
+  Zone zone(isolate->allocator());
   Graph graph(&zone);
   CommonOperatorBuilder common(&zone);
   MachineOperatorBuilder machine(
@@ -2154,11 +2697,12 @@
       InstructionSelector::SupportedMachineOperatorFlags());
   JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
   WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
-  wasm::TreeResult result = wasm::BuildTFGraph(
-      &builder, &env,                                                 // --
-      module_env->module->module_start,                               // --
-      module_env->module->module_start + function.code_start_offset,  // --
-      module_env->module->module_start + function.code_end_offset);   // --
+  wasm::FunctionBody body = {
+      module_env, function.sig, module_env->module->module_start,
+      module_env->module->module_start + function.code_start_offset,
+      module_env->module->module_start + function.code_end_offset};
+  wasm::TreeResult result =
+      wasm::BuildTFGraph(isolate->allocator(), &builder, body);
 
   if (result.failed()) {
     if (FLAG_trace_wasm_compiler) {
@@ -2167,17 +2711,31 @@
     }
     // Add the function as another context for the exception
     ScopedVector<char> buffer(128);
-    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
-             function.func_index,
-             module_env->module->GetName(function.name_offset));
+    wasm::WasmName name =
+        module_env->module->GetName(function.name_offset, function.name_length);
+    SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+             function.func_index, name.length, name.name);
     thrower.Failed(buffer.start(), result);
     return Handle<Code>::null();
   }
 
+  int index = static_cast<int>(function.func_index);
+  if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
+    PrintAst(isolate->allocator(), body);
+  }
+
+  if (FLAG_trace_wasm_decode_time) {
+    decode_ms = decode_timer.Elapsed().InMillisecondsF();
+  }
+
+  base::ElapsedTimer compile_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    compile_timer.Start();
+  }
   // Run the compiler pipeline to generate machine code.
   CallDescriptor* descriptor =
       wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
-  if (kPointerSize == 4) {
+  if (machine.Is32()) {
     descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
   }
   Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
@@ -2192,8 +2750,10 @@
   Vector<char> buffer;
   if (debugging) {
     buffer = Vector<char>::New(128);
-    SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
-             module_env->module->GetName(function.name_offset));
+    wasm::WasmName name =
+        module_env->module->GetName(function.name_offset, function.name_length);
+    SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
+             name.name);
     func_name = buffer.start();
   }
   CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2204,11 +2764,20 @@
     buffer.Dispose();
   }
   if (!code.is_null()) {
-    RecordFunctionCompilation(
-        Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
-        module_env->module->GetName(function.name_offset));
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
+                              function.func_index,
+                              module_env->module->GetName(
+                                  function.name_offset, function.name_length));
   }
 
+  if (FLAG_trace_wasm_decode_time) {
+    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+    PrintF(
+        "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
+        "compile\n",
+        static_cast<int>(function.code_end_offset - function.code_start_offset),
+        decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
+  }
   return code;
 }
 
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 2e86b56..bbcafa7 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -40,7 +40,9 @@
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    wasm::FunctionSig* sig, const char* name);
+                                    wasm::FunctionSig* sig,
+                                    wasm::WasmName module_name,
+                                    wasm::WasmName function_name);
 
 // Wraps a given wasm code object, producing a JSFunction that can be called
 // from JavaScript.
@@ -162,8 +164,12 @@
   Node* MemBuffer(uint32_t offset);
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
 
+  Node* MaskShiftCount32(Node* node);
+  Node* MaskShiftCount64(Node* node);
+
   Node* BuildCCall(MachineSignature* sig, Node** args);
   Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+
   Node* BuildF32Neg(Node* input);
   Node* BuildF64Neg(Node* input);
   Node* BuildF32CopySign(Node* left, Node* right);
@@ -180,8 +186,8 @@
   Node* BuildI32Popcnt(Node* input);
   Node* BuildI64Ctz(Node* input);
   Node* BuildI64Popcnt(Node* input);
-  Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
-                                 MachineType type);
+  Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
+                              Node* input0, Node* input1 = nullptr);
   Node* BuildF32Trunc(Node* input);
   Node* BuildF32Floor(Node* input);
   Node* BuildF32Ceil(Node* input);
@@ -190,10 +196,54 @@
   Node* BuildF64Floor(Node* input);
   Node* BuildF64Ceil(Node* input);
   Node* BuildF64NearestInt(Node* input);
+  Node* BuildI32Rol(Node* left, Node* right);
+  Node* BuildI64Rol(Node* left, Node* right);
 
-  Node** Realloc(Node** buffer, size_t count) {
-    Node** buf = Buffer(count);
-    if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+  Node* BuildF64Acos(Node* input);
+  Node* BuildF64Asin(Node* input);
+  Node* BuildF64Atan(Node* input);
+  Node* BuildF64Cos(Node* input);
+  Node* BuildF64Sin(Node* input);
+  Node* BuildF64Tan(Node* input);
+  Node* BuildF64Exp(Node* input);
+  Node* BuildF64Log(Node* input);
+  Node* BuildF64Pow(Node* left, Node* right);
+  Node* BuildF64Atan2(Node* left, Node* right);
+  Node* BuildF64Mod(Node* left, Node* right);
+
+  Node* BuildIntToFloatConversionInstruction(
+      Node* input, ExternalReference ref,
+      MachineRepresentation parameter_representation,
+      const MachineType result_type);
+  Node* BuildF32SConvertI64(Node* input);
+  Node* BuildF32UConvertI64(Node* input);
+  Node* BuildF64SConvertI64(Node* input);
+  Node* BuildF64UConvertI64(Node* input);
+
+  Node* BuildFloatToIntConversionInstruction(
+      Node* input, ExternalReference ref,
+      MachineRepresentation parameter_representation,
+      const MachineType result_type);
+  Node* BuildI64SConvertF32(Node* input);
+  Node* BuildI64UConvertF32(Node* input);
+  Node* BuildI64SConvertF64(Node* input);
+  Node* BuildI64UConvertF64(Node* input);
+
+  Node* BuildI32DivS(Node* left, Node* right);
+  Node* BuildI32RemS(Node* left, Node* right);
+  Node* BuildI32DivU(Node* left, Node* right);
+  Node* BuildI32RemU(Node* left, Node* right);
+
+  Node* BuildI64DivS(Node* left, Node* right);
+  Node* BuildI64RemS(Node* left, Node* right);
+  Node* BuildI64DivU(Node* left, Node* right);
+  Node* BuildI64RemU(Node* left, Node* right);
+  Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
+                       MachineType result_type, int trap_zero);
+
+  Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
+    Node** buf = Buffer(new_count);
+    if (buf != buffer) memcpy(buf, buffer, old_count * sizeof(Node*));
     return buf;
   }
 };
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 3176fd3..f0e14ce 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
 // ===========================================================================
 // == ia32 ===================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
 #define FP_RETURN_REGISTERS xmm1, xmm2
@@ -125,6 +125,24 @@
 #define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
 #define FP_RETURN_REGISTERS d1, d2
 
+#elif V8_TARGET_ARCH_S390X
+// ===========================================================================
+// == s390x ==================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2
+#define FP_PARAM_REGISTERS d0, d2, d4, d6
+#define FP_RETURN_REGISTERS d0, d2, d4, d6
+
+#elif V8_TARGET_ARCH_S390
+// ===========================================================================
+// == s390 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r2, r3, r4, r5, r6
+#define GP_RETURN_REGISTERS r2, r3
+#define FP_PARAM_REGISTERS d0, d2
+#define FP_RETURN_REGISTERS d0, d2
+
 #else
 // ===========================================================================
 // == unknown ================================================================
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 510c0c6..2e4eccb 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -49,8 +49,11 @@
 
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
+  }
+
+  Operand SlotToOperand(int slot_index, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot_index);
     return Operand(offset.from_stack_pointer() ? rsp : rbp,
                    offset.offset() + extra);
   }
@@ -599,6 +602,12 @@
     }                                                            \
   } while (false)
 
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ movq(rsp, rbp);
+  __ popq(rbp);
+}
+
+void CodeGenerator::AssembleSetupStackPointer() {}
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -615,18 +624,43 @@
     __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ movq(rbp, MemOperand(rbp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register scratch1,
+                                                     Register scratch2,
+                                                     Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &done, Label::kNear);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ SmiToInteger32(
+      caller_args_count_reg,
+      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack);
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X64OperandConverter i(this, instr);
-
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
@@ -641,9 +675,15 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -668,6 +708,7 @@
       RecordCallPosition(instr);
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -677,6 +718,11 @@
       }
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         i.TempRegister(0), i.TempRegister(1),
+                                         i.TempRegister(2));
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -735,7 +781,7 @@
       __ movq(i.OutputRegister(), rbp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ movq(i.OutputRegister(), Operand(rbp, 0));
       } else {
         __ movq(i.OutputRegister(), rbp);
@@ -799,12 +845,24 @@
     case kX64And:
       ASSEMBLE_BINOP(andq);
       break;
+    case kX64Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
+      break;
+    case kX64Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
     case kX64Cmp32:
       ASSEMBLE_COMPARE(cmpl);
       break;
     case kX64Cmp:
       ASSEMBLE_COMPARE(cmpq);
       break;
+    case kX64Test8:
+      ASSEMBLE_COMPARE(testb);
+      break;
+    case kX64Test16:
+      ASSEMBLE_COMPARE(testw);
+      break;
     case kX64Test32:
       ASSEMBLE_COMPARE(testl);
       break;
@@ -1001,7 +1059,6 @@
       } else {
         __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
       }
-      __ AssertZeroExtended(i.OutputRegister());
       break;
     }
     case kSSEFloat64Cmp:
@@ -1100,7 +1157,9 @@
       } else {
         __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
       }
-      __ AssertZeroExtended(i.OutputRegister());
+      if (MiscField::decode(instr->opcode())) {
+        __ AssertZeroExtended(i.OutputRegister());
+      }
       break;
     }
     case kSSEFloat32ToInt64:
@@ -1877,18 +1936,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    __ pushq(rbp);
-    __ movq(rbp, rsp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ pushq(rbp);
+      __ movq(rbp, rsp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1968,17 +2025,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
-    __ popq(rbp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
-      __ popq(rbp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
@@ -2043,9 +2098,9 @@
         case Constant::kHeapObject: {
           Handle<HeapObject> src_object = src.ToHeapObject();
           Heap::RootListIndex index;
-          int offset;
-          if (IsMaterializableFromFrame(src_object, &offset)) {
-            __ movp(dst, Operand(rbp, offset));
+          int slot;
+          if (IsMaterializableFromFrame(src_object, &slot)) {
+            __ movp(dst, g.SlotToOperand(slot));
           } else if (IsMaterializableFromRoot(src_object, &index)) {
             __ LoadRoot(dst, index);
           } else {
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 6d5e77c..bd19386 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -18,8 +18,12 @@
   V(X64And32)                      \
   V(X64Cmp)                        \
   V(X64Cmp32)                      \
+  V(X64Cmp16)                      \
+  V(X64Cmp8)                       \
   V(X64Test)                       \
   V(X64Test32)                     \
+  V(X64Test16)                     \
+  V(X64Test8)                      \
   V(X64Or)                         \
   V(X64Or32)                       \
   V(X64Xor)                        \
@@ -139,7 +143,6 @@
   V(X64Poke)                       \
   V(X64StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 1f10b51..3c31965 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -20,8 +20,12 @@
     case kX64And32:
     case kX64Cmp:
     case kX64Cmp32:
+    case kX64Cmp16:
+    case kX64Cmp8:
     case kX64Test:
     case kX64Test32:
+    case kX64Test16:
+    case kX64Test8:
     case kX64Or:
     case kX64Or32:
     case kX64Xor:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index d3a2a8e..ea1d48b 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -36,6 +36,33 @@
     }
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kX64Cmp:
+      case kX64Test:
+        return rep == MachineRepresentation::kWord64 ||
+               rep == MachineRepresentation::kTagged;
+      case kX64Cmp32:
+      case kX64Test32:
+        return rep == MachineRepresentation::kWord32;
+      case kX64Cmp16:
+      case kX64Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kX64Cmp8:
+      case kX64Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
                                              Node* base, Node* displacement,
                                              InstructionOperand inputs[],
@@ -173,9 +200,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -399,8 +424,13 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
@@ -588,6 +618,75 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
+  X64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+      m.right().Is(32)) {
+    // Just load and sign-extend the interesting 4 bytes instead. This happens,
+    // for example, when we're loading and untagging SMIs.
+    BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+    if (mleft.matches() && (mleft.displacement() == nullptr ||
+                            g.CanBeImmediate(mleft.displacement()))) {
+      size_t input_count = 0;
+      InstructionOperand inputs[3];
+      AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+          m.left().node(), inputs, &input_count);
+      if (mleft.displacement() == nullptr) {
+        // Make sure that the addressing mode indicates the presence of an
+        // immediate displacement. It seems that we never use M1 and M2, but we
+        // handle them here anyways.
+        switch (mode) {
+          case kMode_MR:
+            mode = kMode_MRI;
+            break;
+          case kMode_MR1:
+            mode = kMode_MR1I;
+            break;
+          case kMode_MR2:
+            mode = kMode_MR2I;
+            break;
+          case kMode_MR4:
+            mode = kMode_MR4I;
+            break;
+          case kMode_MR8:
+            mode = kMode_MR8I;
+            break;
+          case kMode_M1:
+            mode = kMode_M1I;
+            break;
+          case kMode_M2:
+            mode = kMode_M2I;
+            break;
+          case kMode_M4:
+            mode = kMode_M4I;
+            break;
+          case kMode_M8:
+            mode = kMode_M8I;
+            break;
+          case kMode_None:
+          case kMode_MRI:
+          case kMode_MR1I:
+          case kMode_MR2I:
+          case kMode_MR4I:
+          case kMode_MR8I:
+          case kMode_M1I:
+          case kMode_M2I:
+          case kMode_M4I:
+          case kMode_M8I:
+            UNREACHABLE();
+        }
+        inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
+      } else {
+        ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
+        int32_t displacement = sequence()->GetImmediate(op).ToInt32();
+        *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+      }
+      InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+      InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
+      Emit(code, 1, outputs, input_count, inputs);
+      return;
+    }
+  }
   VisitWord64Shift(this, node, kX64Sar);
 }
 
@@ -668,8 +767,8 @@
 
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    VisitBinop(this, node, kX64Add, &cont);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Add, &cont);
   }
   FlagsContinuation cont;
   VisitBinop(this, node, kX64Add, &cont);
@@ -708,7 +807,7 @@
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX64Sub, &cont);
   }
   FlagsContinuation cont;
@@ -865,9 +964,15 @@
 
 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
+       g.Use(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
+       g.Use(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
   X64OperandGenerator g(this);
@@ -1336,6 +1441,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
 
 namespace {
 
@@ -1357,6 +1463,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1364,23 +1473,6 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation rep =
-      LoadRepresentationOf(input->op()).representation();
-  if (rep == MachineRepresentation::kWord64 ||
-      rep == MachineRepresentation::kTagged) {
-    return opcode == kX64Cmp || opcode == kX64Test;
-  } else if (rep == MachineRepresentation::kWord32) {
-    return opcode == kX64Cmp32 || opcode == kX64Test32;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1390,6 +1482,9 @@
   if (cont->IsBranch()) {
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1408,6 +1503,37 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kX64Cmp32 && opcode != kX64Test32) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
+    default:
+      return opcode;
+  }
+}
+
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
@@ -1415,15 +1541,20 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(opcode, node, right) &&
+       !g.CanBeMemoryOperand(opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1431,15 +1562,17 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1460,6 +1593,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1500,98 +1636,87 @@
   VisitCompare(selector, opcode, right, left, cont, false);
 }
 
-}  // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
-                                      BasicBlock* fbranch) {
-  X64OperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
-  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
-  // Try to combine with comparisons against 0 by simply inverting the branch.
-  while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-    Int32BinopMatcher m(value);
-    if (m.right().Is(0)) {
-      user = value;
-      value = m.left().node();
-      cont.Negate();
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
+// Shared routine for word comparison against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
-        cont.OverwriteAndNegateIfEqual(kEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
+      }
       case IrOpcode::kInt32LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kInt32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kUint32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kUint32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kWord64Equal: {
-        cont.OverwriteAndNegateIfEqual(kEqual);
+        cont->OverwriteAndNegateIfEqual(kEqual);
         Int64BinopMatcher m(value);
         if (m.right().Is(0)) {
           // Try to combine the branch with a comparison.
           Node* const user = m.node();
           Node* const value = m.left().node();
-          if (CanCover(user, value)) {
+          if (selector->CanCover(user, value)) {
             switch (value->opcode()) {
               case IrOpcode::kInt64Sub:
-                return VisitWord64Compare(this, value, &cont);
+                return VisitWord64Compare(selector, value, cont);
               case IrOpcode::kWord64And:
-                return VisitWordCompare(this, value, kX64Test, &cont);
+                return VisitWordCompare(selector, value, kX64Test, cont);
               default:
                 break;
             }
           }
-          return VisitCompareZero(this, value, kX64Cmp, &cont);
+          return VisitCompareZero(selector, value, kX64Cmp, cont);
         }
-        return VisitWord64Compare(this, value, &cont);
+        return VisitWord64Compare(selector, value, cont);
       }
       case IrOpcode::kInt64LessThan:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kInt64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kUint64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kUint64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
-        return VisitWord64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kFloat32Equal:
-        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat32LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
-        return VisitFloat32Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat64Equal:
-        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThan:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kFloat64LessThanOrEqual:
-        cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
-        return VisitFloat64Compare(this, value, &cont);
+        cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
       case IrOpcode::kProjection:
         // Check if this is the overflow output projection of an
         // <Operation>WithOverflow node.
@@ -1603,20 +1728,20 @@
           // *AFTER* this branch).
           Node* const node = value->InputAt(0);
           Node* const result = NodeProperties::FindProjection(node, 0);
-          if (result == nullptr || IsDefined(result)) {
+          if (result == nullptr || selector->IsDefined(result)) {
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Add32, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Add32, cont);
               case IrOpcode::kInt32SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Sub32, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Sub32, cont);
               case IrOpcode::kInt64AddWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Add, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Add, cont);
               case IrOpcode::kInt64SubWithOverflow:
-                cont.OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop(this, node, kX64Sub, &cont);
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kX64Sub, cont);
               default:
                 break;
             }
@@ -1624,22 +1749,42 @@
         }
         break;
       case IrOpcode::kInt32Sub:
-        return VisitWordCompare(this, value, kX64Cmp32, &cont);
+        return VisitWordCompare(selector, value, kX64Cmp32, cont);
       case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(this, value, &cont);
+        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kWord32And:
-        return VisitWordCompare(this, value, kX64Test32, &cont);
+        return VisitWordCompare(selector, value, kX64Test32, cont);
       case IrOpcode::kWord64And:
-        return VisitWordCompare(this, value, kX64Test, &cont);
+        return VisitWordCompare(selector, value, kX64Test, cont);
       default:
         break;
     }
+    break;
   }
 
   // Branch could not be combined with a compare, emit compare against 0.
-  VisitCompareZero(this, value, kX64Cmp32, &cont);
+  VisitCompareZero(selector, value, kX64Cmp32, cont);
 }
 
+}  // namespace
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   X64OperandGenerator g(this);
@@ -1674,7 +1819,7 @@
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
   Node* user = node;
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
     Node* value = m.left().node();
@@ -1709,31 +1854,33 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
 
 
 void InstructionSelector::VisitWord64Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int64BinopMatcher m(node);
   if (m.right().Is(0)) {
     // Try to combine the equality check with a comparison.
@@ -1756,8 +1903,8 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
-    VisitBinop(this, node, kX64Add32, &cont);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+    return VisitBinop(this, node, kX64Add32, &cont);
   }
   FlagsContinuation cont;
   VisitBinop(this, node, kX64Add32, &cont);
@@ -1766,7 +1913,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX64Sub32, &cont);
   }
   FlagsContinuation cont;
@@ -1775,61 +1922,67 @@
 
 
 void InstructionSelector::VisitInt64LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWord64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 1575570..da7fdb4 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -43,16 +43,13 @@
       return Operand(ToRegister(op));
     }
     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        AllocatedOperand::cast(op)->index());
-    return Operand(offset.from_stack_pointer() ? esp : ebp,
-                   offset.offset() + extra);
+    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
   }
 
-  Operand ToMaterializableOperand(int materializable_offset) {
-    FrameOffset offset = frame_access_state()->GetFrameOffset(
-        FPOffsetToFrameSlot(materializable_offset));
-    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  Operand SlotToOperand(int slot, int extra = 0) {
+    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+    return Operand(offset.from_stack_pointer() ? esp : ebp,
+                   offset.offset() + extra);
   }
 
   Operand HighOperand(InstructionOperand* op) {
@@ -340,6 +337,42 @@
     __ bind(&done);                                          \
   } while (false)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+  __ mov(esp, ebp);
+  __ pop(ebp);
+}
+
+// For insert fninit/fld1 instructions after the Prologue
+thread_local bool is_block_0 = false;
+
+void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
 
 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@@ -356,18 +389,64 @@
     __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
-  if (frame()->needs_frame()) {
+  if (frame_access_state()->has_frame()) {
     __ mov(ebp, MemOperand(ebp, 0));
   }
   frame_access_state()->SetFrameAccessToSP();
 }
 
+void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
+                                                     Register, Register,
+                                                     Register) {
+  // There are not enough temp registers left on ia32 for a call instruction
+  // so we pick some scratch registers and save/restore them manually here.
+  int scratch_count = 3;
+  Register scratch1 = ebx;
+  Register scratch2 = ecx;
+  Register scratch3 = edx;
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Label done;
+
+  // Check if current frame is an arguments adaptor frame.
+  __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &done, Label::kNear);
+
+  __ push(scratch1);
+  __ push(scratch2);
+  __ push(scratch3);
+
+  // Load arguments count from current arguments adaptor frame (note, it
+  // does not include receiver).
+  Register caller_args_count_reg = scratch1;
+  __ mov(caller_args_count_reg,
+         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(caller_args_count_reg);
+
+  ParameterCount callee_args_count(args_reg);
+  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
+                        scratch3, ReturnAddressState::kOnStack, scratch_count);
+  __ pop(scratch3);
+  __ pop(scratch2);
+  __ pop(scratch1);
+
+  __ bind(&done);
+}
 
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X87OperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
 
-  switch (ArchOpcodeField::decode(instr->opcode())) {
+  // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
+  if (is_block_0) {
+    __ fninit();
+    __ fld1();
+    is_block_0 = false;
+  }
+
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
@@ -399,6 +478,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallCodeObjectFromJSFunction:
     case kArchTailCallCodeObject: {
       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
         __ VerifyX87StackDepth(1);
@@ -406,6 +486,10 @@
       __ fstp(0);
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ jmp(code, RelocInfo::CODE_TARGET);
@@ -447,6 +531,7 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
+    case kArchTailCallJSFunctionFromJSFunction:
     case kArchTailCallJSFunction: {
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
@@ -460,6 +545,10 @@
       __ fstp(0);
       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
       AssembleDeconstructActivationRecord(stack_param_delta);
+      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
+        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+                                         no_reg, no_reg, no_reg);
+      }
       __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
       frame_access_state()->ClearSPDelta();
       break;
@@ -554,7 +643,7 @@
       __ mov(i.OutputRegister(), esp);
       break;
     case kArchParentFramePointer:
-      if (frame_access_state()->frame()->needs_frame()) {
+      if (frame_access_state()->has_frame()) {
         __ mov(i.OutputRegister(), Operand(ebp, 0));
       } else {
         __ mov(i.OutputRegister(), ebp);
@@ -615,38 +704,22 @@
       }
       break;
     case kX87Cmp:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ cmp(operand, i.InputImmediate(index));
-        } else {
-          __ cmp(operand, i.InputRegister(index));
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ cmp(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ cmp(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(cmp);
+      break;
+    case kX87Cmp16:
+      ASSEMBLE_COMPARE(cmpw);
+      break;
+    case kX87Cmp8:
+      ASSEMBLE_COMPARE(cmpb);
       break;
     case kX87Test:
-      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
-        size_t index = 0;
-        Operand operand = i.MemoryOperand(&index);
-        if (HasImmediateInput(instr, index)) {
-          __ test(operand, i.InputImmediate(index));
-        } else {
-          __ test(i.InputRegister(index), operand);
-        }
-      } else {
-        if (HasImmediateInput(instr, 1)) {
-          __ test(i.InputOperand(0), i.InputImmediate(1));
-        } else {
-          __ test(i.InputRegister(0), i.InputOperand(1));
-        }
-      }
+      ASSEMBLE_COMPARE(test);
+      break;
+    case kX87Test16:
+      ASSEMBLE_COMPARE(test_w);
+      break;
+    case kX87Test8:
+      ASSEMBLE_COMPARE(test_b);
       break;
     case kX87Imul:
       if (HasImmediateInput(instr, 1)) {
@@ -717,6 +790,92 @@
         __ sar_cl(i.OutputOperand());
       }
       break;
+    case kX87AddPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ add(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ add(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kX87SubPair: {
+      // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
+      // i.InputRegister(1) ... left high word.
+      // i.InputRegister(2) ... right low word.
+      // i.InputRegister(3) ... right high word.
+      bool use_temp = false;
+      if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
+          i.OutputRegister(0).code() == i.InputRegister(3).code()) {
+        // We cannot write to the output register directly, because it would
+        // overwrite an input for adc. We have to use the temp register.
+        use_temp = true;
+        __ Move(i.TempRegister(0), i.InputRegister(0));
+        __ sub(i.TempRegister(0), i.InputRegister(2));
+      } else {
+        __ sub(i.OutputRegister(0), i.InputRegister(2));
+      }
+      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
+      if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
+        __ Move(i.OutputRegister(1), i.InputRegister(1));
+      }
+      if (use_temp) {
+        __ Move(i.OutputRegister(0), i.TempRegister(0));
+      }
+      break;
+    }
+    case kX87MulPair: {
+      __ imul(i.OutputRegister(1), i.InputOperand(0));
+      __ mov(i.TempRegister(0), i.InputOperand(1));
+      __ imul(i.TempRegister(0), i.InputOperand(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+      __ mov(i.OutputRegister(0), i.InputOperand(0));
+      // Multiplies the low words and stores them in eax and edx.
+      __ mul(i.InputRegister(2));
+      __ add(i.OutputRegister(1), i.TempRegister(0));
+
+      break;
+    }
+    case kX87ShlPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kX87ShrPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
+    case kX87SarPair:
+      if (HasImmediateInput(instr, 2)) {
+        __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
+      } else {
+        // Shift has been loaded into CL by the register allocator.
+        __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
+      }
+      break;
     case kX87Ror:
       if (HasImmediateInput(instr, 1)) {
         __ ror(i.OutputOperand(), i.InputInt5(1));
@@ -1176,8 +1335,8 @@
       InstructionOperand* input = instr->InputAt(0);
       if (input->IsDoubleRegister()) {
         __ sub(esp, Immediate(kDoubleSize));
-        __ fstp_d(MemOperand(esp, 0));
-        __ fld_d(MemOperand(esp, 0));
+        __ fstp_s(MemOperand(esp, 0));
+        __ fld_s(MemOperand(esp, 0));
         __ add(esp, Immediate(kDoubleSize));
       } else {
         DCHECK(input->IsDoubleStackSlot());
@@ -1264,11 +1423,12 @@
         __ fld_d(i.InputOperand(0));
       }
       __ fild_s(Operand(esp, 0));
-      __ fadd(1);
-      __ fstp(0);
+      __ fld(1);
+      __ faddp();
       __ TruncateX87TOSToI(i.OutputRegister(0));
       __ add(esp, Immediate(kInt32Size));
       __ add(i.OutputRegister(), Immediate(0x80000000));
+      __ fstp(0);
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fstp(0);
       }
@@ -1610,8 +1770,16 @@
   X87OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
+
+  Label done;
+  Label tlabel_tmp;
+  Label flabel_tmp;
+  Label* tlabel = &tlabel_tmp;
+  Label* flabel = &flabel_tmp;
+
+  Label* tlabel_dst = branch->true_label;
+  Label* flabel_dst = branch->false_label;
+
   switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
@@ -1661,6 +1829,34 @@
   }
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
+
+  __ jmp(&done);
+  __ bind(&tlabel_tmp);
+  FlagsMode mode = FlagsModeField::decode(instr->opcode());
+  if (mode == kFlags_deoptimize) {
+    int double_register_param_count = 0;
+    int x87_layout = 0;
+    for (size_t i = 0; i < instr->InputCount(); i++) {
+      if (instr->InputAt(i)->IsDoubleRegister()) {
+        double_register_param_count++;
+      }
+    }
+    // Currently we use only one X87 register. If double_register_param_count
+    // is bigger than 1, it means duplicated double register is added to input
+    // of this instruction.
+    if (double_register_param_count > 0) {
+      x87_layout = (0 << 3) | 1;
+    }
+    // The layout of x87 register stack is loaded on the top of FPU register
+    // stack for deoptimization.
+    __ push(Immediate(x87_layout));
+    __ fild_s(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, kPointerSize));
+  }
+  __ jmp(tlabel_dst);
+  __ bind(&flabel_tmp);
+  __ jmp(flabel_dst);
+  __ bind(&done);
 }
 
 
@@ -1914,21 +2110,16 @@
 
 void CodeGenerator::AssemblePrologue() {
   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-  if (descriptor->IsCFunctionCall()) {
-    // Assemble a prologue similar the to cdecl calling convention.
-    __ push(ebp);
-    __ mov(ebp, esp);
-  } else if (descriptor->IsJSFunctionCall()) {
-    // TODO(turbofan): this prologue is redundant with OSR, but needed for
-    // code aging.
-    __ Prologue(this->info()->GeneratePreagedPrologue());
-  } else if (frame()->needs_frame()) {
-    __ StubPrologue();
-  } else {
-    frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
+  if (frame_access_state()->has_frame()) {
+    if (descriptor->IsCFunctionCall()) {
+      __ push(ebp);
+      __ mov(ebp, esp);
+    } else if (descriptor->IsJSFunctionCall()) {
+      __ Prologue(this->info()->GeneratePreagedPrologue());
+    } else {
+      __ StubPrologue(info()->GetOutputStackFrameType());
+    }
   }
-  frame_access_state()->SetFrameAccessToDefault();
-
   int stack_shrink_slots = frame()->GetSpillSlotCount();
   if (info()->is_osr()) {
     // TurboFan OSR-compiled functions cannot be entered directly.
@@ -1941,6 +2132,10 @@
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+
+    // Initailize FPU state.
+    __ fninit();
+    __ fld1();
   }
 
   const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1958,10 +2153,6 @@
     }
     frame()->AllocateSavedCalleeRegisterSlots(pushed);
   }
-
-  // Initailize FPU state.
-  __ fninit();
-  __ fld1();
 }
 
 
@@ -1994,17 +2185,15 @@
   }
 
   if (descriptor->IsCFunctionCall()) {
-    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-    __ pop(ebp);       // Pop caller's frame pointer.
-  } else if (frame()->needs_frame()) {
+    AssembleDeconstructFrame();
+  } else if (frame_access_state()->has_frame()) {
     // Canonicalize JSFunction return sites for now.
     if (return_label_.is_bound()) {
       __ jmp(&return_label_);
       return;
     } else {
       __ bind(&return_label_);
-      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
-      __ pop(ebp);       // Pop caller's frame pointer.
+      AssembleDeconstructFrame();
     }
   }
   if (pop_count == 0) {
@@ -2040,15 +2229,15 @@
     Constant src_constant = g.ToConstant(source);
     if (src_constant.type() == Constant::kHeapObject) {
       Handle<HeapObject> src = src_constant.ToHeapObject();
-      int offset;
-      if (IsMaterializableFromFrame(src, &offset)) {
+      int slot;
+      if (IsMaterializableFromFrame(src, &slot)) {
         if (destination->IsRegister()) {
           Register dst = g.ToRegister(destination);
-          __ mov(dst, g.ToMaterializableOperand(offset));
+          __ mov(dst, g.SlotToOperand(slot));
         } else {
           DCHECK(destination->IsStackSlot());
           Operand dst = g.ToOperand(destination);
-          __ push(g.ToMaterializableOperand(offset));
+          __ push(g.SlotToOperand(slot));
           __ pop(dst);
         }
       } else if (destination->IsRegister()) {
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index e5d0912..d70a737 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -17,7 +17,11 @@
   V(X87Add)                        \
   V(X87And)                        \
   V(X87Cmp)                        \
+  V(X87Cmp16)                      \
+  V(X87Cmp8)                       \
   V(X87Test)                       \
+  V(X87Test16)                     \
+  V(X87Test8)                      \
   V(X87Or)                         \
   V(X87Xor)                        \
   V(X87Sub)                        \
@@ -31,6 +35,12 @@
   V(X87Shl)                        \
   V(X87Shr)                        \
   V(X87Sar)                        \
+  V(X87AddPair)                    \
+  V(X87SubPair)                    \
+  V(X87MulPair)                    \
+  V(X87ShlPair)                    \
+  V(X87ShrPair)                    \
+  V(X87SarPair)                    \
   V(X87Ror)                        \
   V(X87Lzcnt)                      \
   V(X87Popcnt)                     \
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index 079d5d2..e4d085e 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -27,6 +27,30 @@
     return DefineAsRegister(node);
   }
 
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(node, input)) {
+      return false;
+    }
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kX87Cmp:
+      case kX87Test:
+        return rep == MachineRepresentation::kWord32 ||
+               rep == MachineRepresentation::kTagged;
+      case kX87Cmp16:
+      case kX87Test16:
+        return rep == MachineRepresentation::kWord16;
+      case kX87Cmp8:
+      case kX87Test8:
+        return rep == MachineRepresentation::kWord8;
+      default:
+        break;
+    }
+    return false;
+  }
+
   InstructionOperand CreateImmediate(int imm) {
     return sequence()->AddImmediate(Constant(imm));
   }
@@ -193,9 +217,7 @@
       inputs[input_count++] = g.UseUniqueRegister(index);
       addressing_mode = kMode_MR1;
     }
-    inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
-                                ? g.UseRegister(value)
-                                : g.UseUniqueRegister(value);
+    inputs[input_count++] = g.UseUniqueRegister(value);
     RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
     switch (write_barrier_kind) {
       case kNoWriteBarrier:
@@ -365,10 +387,11 @@
   }
 }
 
+namespace {
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
@@ -417,18 +440,24 @@
   DCHECK_GE(arraysize(inputs), input_count);
   DCHECK_GE(arraysize(outputs), output_count);
 
-  selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
-                 inputs);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 
 // Shared routine for multiple binary operations.
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, &cont);
 }
 
+}  // namespace
 
 void InstructionSelector::VisitWord32And(Node* node) {
   VisitBinop(this, node, kX87And);
@@ -491,9 +520,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X87OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempRegister(eax)};
   selector->Emit(opcode, g.DefineAsFixed(node, edx),
                  g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
@@ -539,6 +569,93 @@
   VisitShift(this, node, kX87Sar);
 }
 
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+  X87OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+  X87OperandGenerator g(this);
+
+  // We use UseUniqueRegister here to avoid register sharing with the temp
+  // register.
+  InstructionOperand inputs[] = {
+      g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+      g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+  InstructionOperand outputs[] = {
+      g.DefineSameAsFirst(node),
+      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+  InstructionOperand temps[] = {g.TempRegister()};
+
+  Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+  X87OperandGenerator g(this);
+
+  // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+  // register and one mov instruction.
+  InstructionOperand inputs[] = {
+      g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
+      g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+
+  InstructionOperand temps[] = {g.TempRegister(edx)};
+
+  Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+}
+
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+                          Node* node) {
+  X87OperandGenerator g(selector);
+
+  Node* shift = node->InputAt(2);
+  InstructionOperand shift_operand;
+  if (g.CanBeImmediate(shift)) {
+    shift_operand = g.UseImmediate(shift);
+  } else {
+    shift_operand = g.UseFixed(shift, ecx);
+  }
+  InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
+                                 g.UseFixed(node->InputAt(1), edx),
+                                 shift_operand};
+
+  InstructionOperand outputs[] = {
+      g.DefineAsFixed(node, eax),
+      g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+
+  selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+  VisitWord32PairShift(this, kX87ShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+  VisitWord32PairShift(this, kX87ShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+  VisitWord32PairShift(this, kX87SarPair, node);
+}
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
   VisitShift(this, node, kX87Ror);
@@ -714,6 +831,10 @@
   Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
 }
 
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   X87OperandGenerator g(this);
@@ -990,6 +1111,7 @@
 
 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
 
+int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
 
 namespace {
 
@@ -1011,6 +1133,9 @@
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1018,33 +1143,21 @@
   }
 }
 
-// Determines if {input} of {node} can be replaced by a memory operand.
-bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
-                         Node* node, Node* input) {
-  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
-    return false;
-  }
-  MachineRepresentation load_representation =
-      LoadRepresentationOf(input->op()).representation();
-  if (load_representation == MachineRepresentation::kWord32 ||
-      load_representation == MachineRepresentation::kTagged) {
-    return opcode == kX87Cmp || opcode == kX87Test;
-  }
-  return false;
-}
-
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
   if (cont->IsBranch()) {
-    selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
+    selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
-    selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
-                   left, right);
+    selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
   }
 }
 
@@ -1060,6 +1173,36 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+// Tries to match the size of the given opcode to that of the operands, if
+// possible.
+InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
+                                    Node* right) {
+  if (opcode != kX87Cmp && opcode != kX87Test) {
+    return opcode;
+  }
+  // Currently, if one of the two operands is not a Load, we don't know what its
+  // machine representation is, so we bail out.
+  // TODO(epertoso): we can probably get some size information out of immediates
+  // and phi nodes.
+  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
+    return opcode;
+  }
+  // If the load representations don't match, both operands will be
+  // zero/sign-extended to 32bit.
+  LoadRepresentation left_representation = LoadRepresentationOf(left->op());
+  if (left_representation != LoadRepresentationOf(right->op())) {
+    return opcode;
+  }
+  switch (left_representation.representation()) {
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kWord8:
+      return opcode == kX87Cmp ? kX87Cmp8 : kX87Test8;
+    case MachineRepresentation::kWord16:
+      return opcode == kX87Cmp ? kX87Cmp16 : kX87Test16;
+    default:
+      return opcode;
+  }
+}
 
 // Shared routine for multiple float32 compare operations (inputs commuted).
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1070,6 +1213,10 @@
   if (cont->IsBranch()) {
     selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+                             g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float32Cmp),
@@ -1087,6 +1234,10 @@
   if (cont->IsBranch()) {
     selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
+  } else if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+                             g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
+                             cont->frame_state());
   } else {
     DCHECK(cont->IsSet());
     selector->Emit(cont->Encode(kX87Float64Cmp),
@@ -1101,15 +1252,22 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // If one of the two inputs is an immediate, make sure it's on the right.
-  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+  InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
+      (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
+       !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    if (g.CanBeMemoryOperand(opcode, node, left)) {
+      // TODO(epertoso): we should use `narrowed_opcode' here once we match
+      // immediates too.
       return VisitCompareWithMemoryOperand(selector, opcode, left,
                                            g.UseImmediate(right), cont);
     }
@@ -1117,15 +1275,21 @@
                         cont);
   }
 
+  // Match memory operands on left side of comparison.
+  if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+    bool needs_byte_register =
+        narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
+    return VisitCompareWithMemoryOperand(
+        selector, narrowed_opcode, left,
+        needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
+        cont);
+  }
+
   if (g.CanBeBetterLeftOperand(right)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
     std::swap(left, right);
   }
 
-  if (CanUseMemoryOperand(selector, opcode, node, left)) {
-    return VisitCompareWithMemoryOperand(selector, opcode, left,
-                                         g.UseRegister(right), cont);
-  }
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1145,6 +1309,9 @@
       if (cont->IsBranch()) {
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
+      } else if (cont->IsDeoptimize()) {
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+                                 cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1254,6 +1421,17 @@
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
+void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
 
 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
   X87OperandGenerator g(this);
@@ -1284,7 +1462,7 @@
 
 
 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  FlagsContinuation cont(kEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
     return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
@@ -1294,32 +1472,34 @@
 
 
 void InstructionSelector::VisitInt32LessThan(Node* node) {
-  FlagsContinuation cont(kSignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThan, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
   VisitWordCompare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX87Add, &cont);
   }
   FlagsContinuation cont;
@@ -1329,7 +1509,7 @@
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
-    FlagsContinuation cont(kOverflow, ovf);
+    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop(this, node, kX87Sub, &cont);
   }
   FlagsContinuation cont;
@@ -1338,37 +1518,41 @@
 
 
 void InstructionSelector::VisitFloat32Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat32Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64Equal(Node* node) {
-  FlagsContinuation cont(kUnorderedEqual, node);
+  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThan(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThan, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
 
 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
-  FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
+  FlagsContinuation cont =
+      FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
   VisitFloat64Compare(this, node, &cont);
 }
 
diff --git a/src/compiler/zone-pool.cc b/src/compiler/zone-pool.cc
index 2006a79..13fec35 100644
--- a/src/compiler/zone-pool.cc
+++ b/src/compiler/zone-pool.cc
@@ -13,7 +13,7 @@
       total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
       max_allocated_bytes_(0) {
   zone_pool_->stats_.push_back(this);
-  for (auto zone : zone_pool_->used_) {
+  for (Zone* zone : zone_pool_->used_) {
     size_t size = static_cast<size_t>(zone->allocation_size());
     std::pair<InitialValues::iterator, bool> res =
         initial_values_.insert(std::make_pair(zone, size));
@@ -64,9 +64,8 @@
   }
 }
 
-
-ZonePool::ZonePool() : max_allocated_bytes_(0), total_deleted_bytes_(0) {}
-
+ZonePool::ZonePool(base::AccountingAllocator* allocator)
+    : max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
 
 ZonePool::~ZonePool() {
   DCHECK(used_.empty());
@@ -103,7 +102,7 @@
     zone = unused_.back();
     unused_.pop_back();
   } else {
-    zone = new Zone();
+    zone = new Zone(allocator_);
   }
   used_.push_back(zone);
   DCHECK_EQ(0u, zone->allocation_size());
@@ -116,7 +115,7 @@
   // Update max.
   max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
   // Update stats.
-  for (auto stat_scope : stats_) {
+  for (StatsScope* stat_scope : stats_) {
     stat_scope->ZoneReturned(zone);
   }
   // Remove from used.
diff --git a/src/compiler/zone-pool.h b/src/compiler/zone-pool.h
index aaf9daa..44a649f 100644
--- a/src/compiler/zone-pool.h
+++ b/src/compiler/zone-pool.h
@@ -61,7 +61,7 @@
     DISALLOW_COPY_AND_ASSIGN(StatsScope);
   };
 
-  ZonePool();
+  explicit ZonePool(base::AccountingAllocator* allocator);
   ~ZonePool();
 
   size_t GetMaxAllocatedBytes();
@@ -82,6 +82,7 @@
   Stats stats_;
   size_t max_allocated_bytes_;
   size_t total_deleted_bytes_;
+  base::AccountingAllocator* allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(ZonePool);
 };