Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/interpreter/DEPS b/src/interpreter/DEPS
deleted file mode 100644
index f8d6b98..0000000
--- a/src/interpreter/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
-  "+src/compiler/interpreter-assembler.h",
-]
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 1b15fc6..7103c72 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -3,12 +3,13 @@
 // found in the LICENSE file.
 
 #include "src/interpreter/bytecode-array-builder.h"
+#include "src/compiler.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-class BytecodeArrayBuilder::PreviousBytecodeHelper {
+class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
  public:
   explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
       : array_builder_(array_builder),
@@ -37,9 +38,9 @@
         Bytecodes::GetOperandOffset(bytecode, operand_index);
     OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
     switch (size) {
-      default:
       case OperandSize::kNone:
         UNREACHABLE();
+        break;
       case OperandSize::kByte:
         return static_cast<uint32_t>(
             array_builder_.bytecodes()->at(operand_offset));
@@ -49,6 +50,7 @@
             array_builder_.bytecodes()->at(operand_offset + 1);
         return static_cast<uint32_t>(operand);
     }
+    return 0;
   }
 
   Handle<Object> GetConstantForIndexOperand(int operand_index) const {
@@ -63,43 +65,31 @@
   DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
 };
 
-
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
+                                           int parameter_count,
+                                           int context_count, int locals_count)
     : isolate_(isolate),
       zone_(zone),
       bytecodes_(zone),
       bytecode_generated_(false),
       constant_array_builder_(isolate, zone),
+      handler_table_builder_(isolate, zone),
+      source_position_table_builder_(isolate, zone),
       last_block_end_(0),
       last_bytecode_start_(~0),
       exit_seen_in_block_(false),
       unbound_jumps_(0),
-      parameter_count_(-1),
-      local_register_count_(-1),
-      context_register_count_(-1),
-      temporary_register_count_(0),
-      free_temporaries_(zone) {}
-
-
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
-
-void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
-  local_register_count_ = number_of_locals;
-  DCHECK_LE(context_register_count_, 0);
-}
-
-
-void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
-  parameter_count_ = number_of_parameters;
-}
-
-
-void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
-  context_register_count_ = number_of_contexts;
+      parameter_count_(parameter_count),
+      local_register_count_(locals_count),
+      context_register_count_(context_count),
+      temporary_allocator_(zone, fixed_register_count()),
+      register_translator_(this) {
+  DCHECK_GE(parameter_count_, 0);
+  DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
 }
 
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
 
 Register BytecodeArrayBuilder::first_context_register() const {
   DCHECK_GT(context_register_count_, 0);
@@ -113,18 +103,6 @@
 }
 
 
-Register BytecodeArrayBuilder::first_temporary_register() const {
-  DCHECK_GT(temporary_register_count_, 0);
-  return Register(fixed_register_count());
-}
-
-
-Register BytecodeArrayBuilder::last_temporary_register() const {
-  DCHECK_GT(temporary_register_count_, 0);
-  return Register(fixed_register_count() + temporary_register_count_ - 1);
-}
-
-
 Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
   DCHECK_GE(parameter_index, 0);
   return Register::FromParameterIndex(parameter_index, parameter_count());
@@ -136,25 +114,23 @@
 }
 
 
-bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
-  return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
-         reg <= last_temporary_register();
-}
-
-
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
   DCHECK_EQ(bytecode_generated_, false);
-  EnsureReturn();
+  DCHECK(exit_seen_in_block_);
 
   int bytecode_size = static_cast<int>(bytecodes_.size());
-  int register_count = fixed_register_count() + temporary_register_count_;
+  int register_count =
+      fixed_and_temporary_register_count() + translation_register_count();
   int frame_size = register_count * kPointerSize;
-  Factory* factory = isolate_->factory();
-  Handle<FixedArray> constant_pool =
-      constant_array_builder()->ToFixedArray(factory);
-  Handle<BytecodeArray> output =
-      factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
-                                parameter_count(), constant_pool);
+  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
+  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
+  Handle<FixedArray> source_position_table =
+      source_position_table_builder()->ToFixedArray();
+  Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+      bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
+      constant_pool);
+  output->set_handler_table(*handler_table);
+  output->set_source_position_table(*source_position_table);
   bytecode_generated_ = true;
   return output;
 }
@@ -163,16 +139,28 @@
 template <size_t N>
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
   // Don't output dead code.
-  if (exit_seen_in_block_) return;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return;
+  }
 
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
+  int operand_count = static_cast<int>(N);
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+
+  int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
+  if (register_operand_count > 0) {
+    register_translator()->TranslateInputRegisters(bytecode, operands,
+                                                   operand_count);
+  }
+
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
-  for (int i = 0; i < static_cast<int>(N); i++) {
+  for (int i = 0; i < operand_count; i++) {
     DCHECK(OperandIsValid(bytecode, i, operands[i]));
     switch (Bytecodes::GetOperandSize(bytecode, i)) {
       case OperandSize::kNone:
         UNREACHABLE();
+        break;
       case OperandSize::kByte:
         bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
         break;
@@ -185,6 +173,10 @@
       }
     }
   }
+
+  if (register_operand_count > 0) {
+    register_translator()->TranslateOutputRegisters();
+  }
 }
 
 
@@ -218,32 +210,23 @@
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
   // Don't output dead code.
-  if (exit_seen_in_block_) return;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return;
+  }
 
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
-                                                            Register reg,
-                                                            Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
-  Output(BytecodeForBinaryOperation(op), reg.ToOperand());
+                                                            Register reg) {
+  Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
-                                                           Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op) {
   Output(BytecodeForCountOperation(op));
   return *this;
 }
@@ -260,14 +243,9 @@
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
-    Token::Value op, Register reg, Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
-  Output(BytecodeForCompareOperation(op), reg.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
+                                                             Register reg) {
+  Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
   return *this;
 }
 
@@ -338,11 +316,10 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kLdar, reg.ToOperand());
+    Output(Bytecode::kLdar, reg.ToRawOperand());
   }
   return *this;
 }
@@ -350,15 +327,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  // TODO(oth): Avoid storing the accumulator in the register if the
-  // previous bytecode loaded the accumulator with the same register.
-  //
-  // TODO(oth): If the previous bytecode is a MOV into this register,
-  // the previous instruction can be removed. The logic for determining
-  // these redundant MOVs appears complex.
-  Output(Bytecode::kStar, reg.ToOperand());
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kStar, reg.ToOperand());
+    Output(Bytecode::kStar, reg.ToRawOperand());
   }
   return *this;
 }
@@ -367,31 +337,37 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
-  return *this;
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
-                                                              Register reg1) {
-  DCHECK(reg0 != reg1);
-  if (FitsInReg8Operand(reg0)) {
-    Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
-  } else if (FitsInReg8Operand(reg1)) {
-    Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+  if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
+    Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
+  } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
+    Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
   } else {
-    Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+    UNIMPLEMENTED();
   }
   return *this;
 }
 
+void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
+                                                    Register to) {
+  // Move bytecodes modify the stack. Checking validity is an
+  // essential mitigation against corrupting the stack.
+  if (FitsInReg8OperandUntranslated(from)) {
+    CHECK(RegisterIsValid(from, OperandType::kReg8) &&
+          RegisterIsValid(to, OperandType::kReg16));
+  } else if (FitsInReg8OperandUntranslated(to)) {
+    CHECK(RegisterIsValid(from, OperandType::kReg16) &&
+          RegisterIsValid(to, OperandType::kReg8));
+  } else {
+    UNIMPLEMENTED();
+  }
+  Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
-    const Handle<String> name, int feedback_slot, LanguageMode language_mode,
-    TypeofMode typeof_mode) {
-  // TODO(rmcilroy): Potentially store language and typeof information in an
+    const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+  // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
-  Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+  Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
     Output(bytecode, static_cast<uint8_t>(name_index),
@@ -429,10 +405,10 @@
                                                             int slot_index) {
   DCHECK(slot_index >= 0);
   if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+    Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
            static_cast<uint8_t>(slot_index));
   } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+    Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
            static_cast<uint16_t>(slot_index));
   } else {
     UNIMPLEMENTED();
@@ -445,10 +421,10 @@
                                                              int slot_index) {
   DCHECK(slot_index >= 0);
   if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlot, context.ToOperand(),
+    Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
            static_cast<uint8_t>(slot_index));
   } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+    Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
            static_cast<uint16_t>(slot_index));
   } else {
     UNIMPLEMENTED();
@@ -490,18 +466,16 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
-    Register object, const Handle<String> name, int feedback_slot,
-    LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForLoadIC(language_mode);
+    Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+    Output(Bytecode::kLoadIC, object.ToRawOperand(),
+           static_cast<uint8_t>(name_index),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(name_index) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(Bytecode::kLoadICWide, object.ToRawOperand(),
            static_cast<uint16_t>(name_index),
            static_cast<uint16_t>(feedback_slot));
   } else {
@@ -510,14 +484,13 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
-    Register object, int feedback_slot, LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+    Register object, int feedback_slot) {
   if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+    Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
+           static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
            static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
@@ -525,18 +498,17 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
-    Register object, const Handle<String> name, int feedback_slot,
+    Register object, const Handle<Name> name, int feedback_slot,
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreIC(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+    Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(name_index) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
            static_cast<uint16_t>(name_index),
            static_cast<uint16_t>(feedback_slot));
   } else {
@@ -551,11 +523,11 @@
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
   if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), key.ToOperand(),
+    Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
-           key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
+           key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
   }
@@ -653,13 +625,13 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  Output(Bytecode::kPushContext, context.ToOperand());
+  Output(Bytecode::kPushContext, context.ToRawOperand());
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  Output(Bytecode::kPopContext, context.ToOperand());
+  Output(Bytecode::kPopContext, context.ToRawOperand());
   return *this;
 }
 
@@ -766,6 +738,8 @@
       return Bytecode::kJumpIfToBooleanTrueConstant;
     case Bytecode::kJumpIfToBooleanFalse:
       return Bytecode::kJumpIfToBooleanFalseConstant;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstant;
     case Bytecode::kJumpIfNull:
       return Bytecode::kJumpIfNullConstant;
     case Bytecode::kJumpIfUndefined:
@@ -791,6 +765,8 @@
       return Bytecode::kJumpIfToBooleanTrueConstantWide;
     case Bytecode::kJumpIfToBooleanFalse:
       return Bytecode::kJumpIfToBooleanFalseConstantWide;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstantWide;
     case Bytecode::kJumpIfNull:
       return Bytecode::kJumpIfNullConstantWide;
     case Bytecode::kJumpIfUndefined:
@@ -808,6 +784,7 @@
     case Bytecode::kJump:
     case Bytecode::kJumpIfNull:
     case Bytecode::kJumpIfUndefined:
+    case Bytecode::kJumpIfNotHole:
       return jump_bytecode;
     case Bytecode::kJumpIfTrue:
       return Bytecode::kJumpIfToBooleanTrue;
@@ -883,7 +860,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
   // Don't emit dead code.
-  if (exit_seen_in_block_) return *this;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return *this;
+  }
 
   // Check if the value in accumulator is boolean, if not choose an
   // appropriate JumpIfToBoolean bytecode.
@@ -965,6 +945,15 @@
   return OutputJump(Bytecode::kJumpIfUndefined, label);
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+  Output(Bytecode::kStackCheck);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
+    BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfNotHole, label);
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
   Output(Bytecode::kThrow);
@@ -973,40 +962,86 @@
 }
 
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
+  Output(Bytecode::kReThrow);
+  exit_seen_in_block_ = true;
+  return *this;
+}
+
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
   Output(Bytecode::kReturn);
   exit_seen_in_block_ = true;
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
+  Output(Bytecode::kDebugger);
+  return *this;
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
-    Register cache_type, Register cache_array, Register cache_length) {
-  Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
-         cache_array.ToOperand(), cache_length.ToOperand());
+    Register cache_info_triple) {
+  if (FitsInReg8Operand(cache_info_triple)) {
+    Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
+  } else if (FitsInReg16Operand(cache_info_triple)) {
+    Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+  Output(Bytecode::kForInDone, index.ToRawOperand(),
+         cache_length.ToRawOperand());
   return *this;
 }
 
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
-                                                      Register cache_type,
-                                                      Register cache_array,
-                                                      Register index) {
-  Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
-         cache_array.ToOperand(), index.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
+    Register receiver, Register index, Register cache_type_array_pair) {
+  if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
+      FitsInReg8Operand(cache_type_array_pair)) {
+    Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
+           cache_type_array_pair.ToRawOperand());
+  } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
+             FitsInReg16Operand(cache_type_array_pair)) {
+    Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
+           index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  Output(Bytecode::kForInStep, index.ToOperand());
+  Output(Bytecode::kForInStep, index.ToRawOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
+                                                        bool will_catch) {
+  handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
+  handler_table_builder()->SetPrediction(handler_id, will_catch);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
+                                                         Register context) {
+  handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+  handler_table_builder()->SetContextRegister(handler_id, context);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
+  handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
   return *this;
 }
 
@@ -1016,27 +1051,33 @@
   exit_seen_in_block_ = false;
 }
 
-
-void BytecodeArrayBuilder::EnsureReturn() {
+void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
   if (!exit_seen_in_block_) {
     LoadUndefined();
+    SetReturnPosition(literal);
     Return();
   }
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
-                                                 Register receiver,
-                                                 size_t arg_count,
-                                                 int feedback_slot) {
-  if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
-           static_cast<uint8_t>(arg_count),
+                                                 Register receiver_args,
+                                                 size_t receiver_args_count,
+                                                 int feedback_slot,
+                                                 TailCallMode tail_call_mode) {
+  Bytecode bytecode = BytecodeForCall(tail_call_mode);
+  if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
+      FitsInIdx8Operand(receiver_args_count) &&
+      FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+           static_cast<uint8_t>(receiver_args_count),
            static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(arg_count) &&
+  } else if (FitsInReg16Operand(callable) &&
+             FitsInReg16Operand(receiver_args) &&
+             FitsInIdx16Operand(receiver_args_count) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
-           static_cast<uint16_t>(arg_count),
+    bytecode = BytecodeForWideOperands(bytecode);
+    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+           static_cast<uint16_t>(receiver_args_count),
            static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
@@ -1044,7 +1085,6 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
                                                 Register first_arg,
                                                 size_t arg_count) {
@@ -1052,9 +1092,17 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  DCHECK(FitsInIdx8Operand(arg_count));
-  Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
-         static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
+      FitsInIdx8Operand(arg_count)) {
+    Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
+           static_cast<uint8_t>(arg_count));
+  } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
+             FitsInIdx16Operand(arg_count)) {
+    Output(Bytecode::kNewWide, constructor.ToRawOperand(),
+           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
@@ -1063,13 +1111,19 @@
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(FitsInIdx16Operand(function_id));
-  DCHECK(FitsInIdx8Operand(arg_count));
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
-         first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
+    Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
+  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
+    Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
@@ -1079,38 +1133,49 @@
     Register first_return) {
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(FitsInIdx16Operand(function_id));
-  DCHECK(FitsInIdx8Operand(arg_count));
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-         first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
-         first_return.ToOperand());
+  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
+      FitsInReg8Operand(first_return)) {
+    Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
+           first_return.ToRawOperand());
+  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
+             FitsInReg16Operand(first_return)) {
+    Output(Bytecode::kCallRuntimeForPairWide,
+           static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
+           static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
-                                                          Register receiver,
-                                                          size_t arg_count) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
+    int context_index, Register receiver_args, size_t receiver_args_count) {
   DCHECK(FitsInIdx16Operand(context_index));
-  DCHECK(FitsInIdx8Operand(arg_count));
-  Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
-         receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(receiver_args) &&
+      FitsInIdx8Operand(receiver_args_count)) {
+    Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+           receiver_args.ToRawOperand(),
+           static_cast<uint8_t>(receiver_args_count));
+  } else if (FitsInReg16Operand(receiver_args) &&
+             FitsInIdx16Operand(receiver_args_count)) {
+    Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
+           receiver_args.ToRawOperand(),
+           static_cast<uint16_t>(receiver_args_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  Output(BytecodeForDelete(language_mode), object.ToOperand());
-  return *this;
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
-  Output(Bytecode::kDeleteLookupSlot);
+  Output(BytecodeForDelete(language_mode), object.ToRawOperand());
   return *this;
 }
 
@@ -1119,126 +1184,62 @@
   return constant_array_builder()->Insert(object);
 }
 
-
-int BytecodeArrayBuilder::BorrowTemporaryRegister() {
-  if (free_temporaries_.empty()) {
-    temporary_register_count_ += 1;
-    return last_temporary_register().index();
-  } else {
-    auto pos = free_temporaries_.begin();
-    int retval = *pos;
-    free_temporaries_.erase(pos);
-    return retval;
-  }
+void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
+  int pos = std::max(fun->start_position(), fun->end_position() - 1);
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
 }
 
-
-int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
-                                                            int end_index) {
-  auto index = free_temporaries_.lower_bound(start_index);
-  if (index == free_temporaries_.begin()) {
-    // If start_index is the first free register, check for a register
-    // greater than end_index.
-    index = free_temporaries_.upper_bound(end_index);
-    if (index == free_temporaries_.end()) {
-      temporary_register_count_ += 1;
-      return last_temporary_register().index();
-    }
-  } else {
-    // If there is a free register < start_index
-    index--;
-  }
-
-  int retval = *index;
-  free_temporaries_.erase(index);
-  return retval;
+void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
+  if (stmt->position() == RelocInfo::kNoPosition) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      stmt->position());
 }
 
-
-void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
-  free_temporaries_.erase(reg_index);
+void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
+  if (expr->position() == RelocInfo::kNoPosition) return;
+  source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
+                                                       expr->position());
 }
 
-
-void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
-  free_temporaries_.insert(reg_index);
-}
-
-
-int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
-    size_t count) {
-  if (count == 0) {
-    return -1;
-  }
-
-  // Search within existing temporaries for a run.
-  auto start = free_temporaries_.begin();
-  size_t run_length = 0;
-  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
-    if (*run_end != *start + static_cast<int>(run_length)) {
-      start = run_end;
-      run_length = 0;
-    }
-    if (++run_length == count) {
-      return *start;
-    }
-  }
-
-  // Continue run if possible across existing last temporary.
-  if (temporary_register_count_ > 0 &&
-      (start == free_temporaries_.end() ||
-       *start + static_cast<int>(run_length) !=
-           last_temporary_register().index() + 1)) {
-    run_length = 0;
-  }
-
-  // Ensure enough registers for run.
-  while (run_length++ < count) {
-    temporary_register_count_++;
-    free_temporaries_.insert(last_temporary_register().index());
-  }
-  return last_temporary_register().index() - static_cast<int>(count) + 1;
-}
-
-
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
-  if (temporary_register_count_ > 0) {
-    DCHECK(reg.index() >= first_temporary_register().index() &&
-           reg.index() <= last_temporary_register().index());
-    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
-  } else {
-    return false;
-  }
+  return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
-  if (reg.is_function_context() || reg.is_function_closure() ||
-      reg.is_new_target()) {
-    return true;
-  } else if (reg.is_parameter()) {
-    int parameter_index = reg.ToParameterIndex(parameter_count_);
-    return parameter_index >= 0 && parameter_index < parameter_count_;
-  } else if (reg.index() < fixed_register_count()) {
-    return true;
-  } else {
-    return TemporaryRegisterIsLive(reg);
-  }
-}
-
-
 bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
                                           uint32_t operand_value) const {
   OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
   switch (operand_type) {
     case OperandType::kNone:
       return false;
-    case OperandType::kCount16:
+    case OperandType::kRegCount16: {
+      // Expect kRegCount16 is part of a range previous operand is a
+      // valid operand to start a range.
+      if (operand_index > 0) {
+        OperandType previous_operand_type =
+            Bytecodes::GetOperandType(bytecode, operand_index - 1);
+        return ((previous_operand_type == OperandType::kMaybeReg16 ||
+                 previous_operand_type == OperandType::kReg16) &&
+                static_cast<uint16_t>(operand_value) == operand_value);
+      } else {
+        return false;
+      }
+    }
+    case OperandType::kRegCount8: {
+      // Expect kRegCount8 is part of a range previous operand is a
+      // valid operand to start a range.
+      if (operand_index > 0) {
+        OperandType previous_operand_type =
+            Bytecodes::GetOperandType(bytecode, operand_index - 1);
+        return ((previous_operand_type == OperandType::kMaybeReg8 ||
+                 previous_operand_type == OperandType::kReg8 ||
+                 previous_operand_type == OperandType::kMaybeReg16) &&
+                static_cast<uint8_t>(operand_value) == operand_value);
+      } else {
+        return false;
+      }
+    }
     case OperandType::kIdx16:
       return static_cast<uint16_t>(operand_value) == operand_value;
-    case OperandType::kCount8:
     case OperandType::kImm8:
     case OperandType::kIdx8:
       return static_cast<uint8_t>(operand_value) == operand_value;
@@ -1248,27 +1249,84 @@
       }
     // Fall-through to kReg8 case.
     case OperandType::kReg8:
-      return RegisterIsValid(
-          Register::FromOperand(static_cast<uint8_t>(operand_value)));
-    case OperandType::kRegPair8: {
-      Register reg0 =
-          Register::FromOperand(static_cast<uint8_t>(operand_value));
+    case OperandType::kRegOut8:
+      return RegisterIsValid(Register::FromRawOperand(operand_value),
+                             operand_type);
+    case OperandType::kRegOutPair8:
+    case OperandType::kRegOutPair16:
+    case OperandType::kRegPair8:
+    case OperandType::kRegPair16: {
+      Register reg0 = Register::FromRawOperand(operand_value);
       Register reg1 = Register(reg0.index() + 1);
-      return RegisterIsValid(reg0) && RegisterIsValid(reg1);
+      return RegisterIsValid(reg0, operand_type) &&
+             RegisterIsValid(reg1, operand_type);
     }
-    case OperandType::kReg16:
-      if (bytecode != Bytecode::kExchange &&
-          bytecode != Bytecode::kExchangeWide) {
-        return false;
+    case OperandType::kRegOutTriple8:
+    case OperandType::kRegOutTriple16: {
+      Register reg0 = Register::FromRawOperand(operand_value);
+      Register reg1 = Register(reg0.index() + 1);
+      Register reg2 = Register(reg0.index() + 2);
+      return RegisterIsValid(reg0, operand_type) &&
+             RegisterIsValid(reg1, operand_type) &&
+             RegisterIsValid(reg2, operand_type);
+    }
+    case OperandType::kMaybeReg16:
+      if (operand_value == 0) {
+        return true;
       }
-      return RegisterIsValid(
-          Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
+    // Fall-through to kReg16 case.
+    case OperandType::kReg16:
+    case OperandType::kRegOut16: {
+      Register reg = Register::FromRawOperand(operand_value);
+      return RegisterIsValid(reg, operand_type);
+    }
   }
   UNREACHABLE();
   return false;
 }
 
 
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
+                                           OperandType reg_type) const {
+  if (!reg.is_valid()) {
+    return false;
+  }
+
+  switch (Bytecodes::SizeOfOperand(reg_type)) {
+    case OperandSize::kByte:
+      if (!FitsInReg8OperandUntranslated(reg)) {
+        return false;
+      }
+      break;
+    case OperandSize::kShort:
+      if (!FitsInReg16OperandUntranslated(reg)) {
+        return false;
+      }
+      break;
+    case OperandSize::kNone:
+      UNREACHABLE();
+      return false;
+  }
+
+  if (reg.is_current_context() || reg.is_function_closure() ||
+      reg.is_new_target()) {
+    return true;
+  } else if (reg.is_parameter()) {
+    int parameter_index = reg.ToParameterIndex(parameter_count());
+    return parameter_index >= 0 && parameter_index < parameter_count();
+  } else if (RegisterTranslator::InTranslationWindow(reg)) {
+    return translation_register_count() > 0;
+  } else {
+    reg = RegisterTranslator::UntranslateRegister(reg);
+    if (reg.index() < fixed_register_count()) {
+      return true;
+    } else {
+      return TemporaryRegisterIsLive(reg);
+    }
+  }
+}
+
+
 bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
   return last_bytecode_start_ < bytecodes()->size() &&
          last_bytecode_start_ >= last_block_end_;
@@ -1279,9 +1337,10 @@
   if (LastBytecodeInSameBlock()) {
     PreviousBytecodeHelper previous_bytecode(*this);
     Bytecode bytecode = previous_bytecode.GetBytecode();
-    if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
-        (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
-      return true;
+    if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
+      Register previous_reg =
+          Register::FromOperand(previous_bytecode.GetOperand(0));
+      return previous_reg == reg;
     }
   }
   return false;
@@ -1367,14 +1426,14 @@
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
   switch (bytecode) {
-    case Bytecode::kLoadICSloppy:
-      return Bytecode::kLoadICSloppyWide;
-    case Bytecode::kLoadICStrict:
-      return Bytecode::kLoadICStrictWide;
-    case Bytecode::kKeyedLoadICSloppy:
-      return Bytecode::kKeyedLoadICSloppyWide;
-    case Bytecode::kKeyedLoadICStrict:
-      return Bytecode::kKeyedLoadICStrictWide;
+    case Bytecode::kCall:
+      return Bytecode::kCallWide;
+    case Bytecode::kTailCall:
+      return Bytecode::kTailCallWide;
+    case Bytecode::kLoadIC:
+      return Bytecode::kLoadICWide;
+    case Bytecode::kKeyedLoadIC:
+      return Bytecode::kKeyedLoadICWide;
     case Bytecode::kStoreICSloppy:
       return Bytecode::kStoreICSloppyWide;
     case Bytecode::kStoreICStrict:
@@ -1383,14 +1442,10 @@
       return Bytecode::kKeyedStoreICSloppyWide;
     case Bytecode::kKeyedStoreICStrict:
       return Bytecode::kKeyedStoreICStrictWide;
-    case Bytecode::kLdaGlobalSloppy:
-      return Bytecode::kLdaGlobalSloppyWide;
-    case Bytecode::kLdaGlobalStrict:
-      return Bytecode::kLdaGlobalStrictWide;
-    case Bytecode::kLdaGlobalInsideTypeofSloppy:
-      return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
-    case Bytecode::kLdaGlobalInsideTypeofStrict:
-      return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+    case Bytecode::kLdaGlobal:
+      return Bytecode::kLdaGlobalWide;
+    case Bytecode::kLdaGlobalInsideTypeof:
+      return Bytecode::kLdaGlobalInsideTypeofWide;
     case Bytecode::kStaGlobalSloppy:
       return Bytecode::kStaGlobalSloppyWide;
     case Bytecode::kStaGlobalStrict:
@@ -1411,39 +1466,6 @@
 
 
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kLoadICSloppy;
-    case STRICT:
-      return Bytecode::kLoadICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kKeyedLoadICSloppy;
-    case STRICT:
-      return Bytecode::kKeyedLoadICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
-}
-
-
-// static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
   switch (language_mode) {
     case SLOPPY:
@@ -1477,23 +1499,9 @@
 
 
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
-                                                     TypeofMode typeof_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return typeof_mode == INSIDE_TYPEOF
-                 ? Bytecode::kLdaGlobalInsideTypeofSloppy
-                 : Bytecode::kLdaGlobalSloppy;
-    case STRICT:
-      return typeof_mode == INSIDE_TYPEOF
-                 ? Bytecode::kLdaGlobalInsideTypeofStrict
-                 : Bytecode::kLdaGlobalStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
+  return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
+                                      : Bytecode::kLdaGlobal;
 }
 
 
@@ -1530,7 +1538,6 @@
   return static_cast<Bytecode>(-1);
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
     CreateArgumentsType type) {
@@ -1539,9 +1546,10 @@
       return Bytecode::kCreateMappedArguments;
     case CreateArgumentsType::kUnmappedArguments:
       return Bytecode::kCreateUnmappedArguments;
-    default:
-      UNREACHABLE();
+    case CreateArgumentsType::kRestParameter:
+      return Bytecode::kCreateRestParameter;
   }
+  UNREACHABLE();
   return static_cast<Bytecode>(-1);
 }
 
@@ -1561,6 +1569,18 @@
   return static_cast<Bytecode>(-1);
 }
 
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      return Bytecode::kCall;
+    case TailCallMode::kAllow:
+      return Bytecode::kTailCall;
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
 
 // static
 bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
@@ -1594,13 +1614,23 @@
 
 // static
 bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
-  return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
+  return RegisterTranslator::FitsInReg8Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
+  return value.is_byte_operand();
 }
 
 
 // static
 bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
-  return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
+  return RegisterTranslator::FitsInReg16Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
+  return value.is_short_operand();
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 7c23dc3..fe69337 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -6,8 +6,12 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
 
 #include "src/ast/ast.h"
+#include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
+#include "src/interpreter/handler-table-builder.h"
+#include "src/interpreter/register-translator.h"
+#include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -18,36 +22,29 @@
 namespace interpreter {
 
 class BytecodeLabel;
-class ConstantArrayBuilder;
 class Register;
 
-// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
-// when rest parameters implementation has settled down.
-enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
-
-class BytecodeArrayBuilder final {
+class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
  public:
-  BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+  BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
+                       int context_count, int locals_count);
   ~BytecodeArrayBuilder();
 
   Handle<BytecodeArray> ToBytecodeArray();
 
-  // Set the number of parameters expected by function.
-  void set_parameter_count(int number_of_params);
+  // Get the number of parameters expected by function.
   int parameter_count() const {
     DCHECK_GE(parameter_count_, 0);
     return parameter_count_;
   }
 
-  // Set the number of locals required for bytecode array.
-  void set_locals_count(int number_of_locals);
+  // Get the number of locals required for bytecode array.
   int locals_count() const {
     DCHECK_GE(local_register_count_, 0);
     return local_register_count_;
   }
 
-  // Set number of contexts required for bytecode array.
-  void set_context_count(int number_of_contexts);
+  // Get number of contexts required for bytecode array.
   int context_count() const {
     DCHECK_GE(context_register_count_, 0);
     return context_register_count_;
@@ -59,14 +56,30 @@
   // Returns the number of fixed (non-temporary) registers.
   int fixed_register_count() const { return context_count() + locals_count(); }
 
+  // Returns the number of fixed and temporary registers.
+  int fixed_and_temporary_register_count() const {
+    return fixed_register_count() + temporary_register_count();
+  }
+
+  int temporary_register_count() const {
+    return temporary_register_allocator()->allocation_count();
+  }
+
+  // Returns the number of registers used for translating wide
+  // register operands into byte sized register operands.
+  int translation_register_count() const {
+    return RegisterTranslator::RegisterCountAdjustment(
+        fixed_and_temporary_register_count(), parameter_count());
+  }
+
   Register Parameter(int parameter_index) const;
 
   // Return true if the register |reg| represents a parameter or a
   // local.
   bool RegisterIsParameterOrLocal(Register reg) const;
 
-  // Return true if the register |reg| represents a temporary register.
-  bool RegisterIsTemporary(Register reg) const;
+  // Returns true if the register |reg| is a live temporary register.
+  bool TemporaryRegisterIsLive(Register reg) const;
 
   // Constant loads to accumulator.
   BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -80,7 +93,6 @@
 
   // Global loads to the accumulator and stores from the accumulator.
   BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
-                                   LanguageMode language_mode,
                                    TypeofMode typeof_mode);
   BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
                                     int feedback_slot,
@@ -98,20 +110,17 @@
 
   // Register-register transfer.
   BytecodeArrayBuilder& MoveRegister(Register from, Register to);
-  BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
 
   // Named load property.
   BytecodeArrayBuilder& LoadNamedProperty(Register object,
-                                          const Handle<String> name,
-                                          int feedback_slot,
-                                          LanguageMode language_mode);
+                                          const Handle<Name> name,
+                                          int feedback_slot);
   // Keyed load property. The key should be in the accumulator.
-  BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
-                                          LanguageMode language_mode);
+  BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
 
   // Store properties. The value to be stored should be in the accumulator.
   BytecodeArrayBuilder& StoreNamedProperty(Register object,
-                                           const Handle<String> name,
+                                           const Handle<Name> name,
                                            int feedback_slot,
                                            LanguageMode language_mode);
   BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
@@ -149,44 +158,51 @@
   BytecodeArrayBuilder& PopContext(Register context);
 
   // Call a JS function. The JSFunction or Callable to be called should be in
-  // |callable|, the receiver should be in |receiver| and all subsequent
-  // arguments should be in registers <receiver + 1> to
-  // <receiver + 1 + arg_count>.
-  BytecodeArrayBuilder& Call(Register callable, Register receiver,
-                             size_t arg_count, int feedback_slot);
+  // |callable|, the receiver should be in |receiver_args| and all subsequent
+  // arguments should be in registers <receiver_args + 1> to
+  // <receiver_args + receiver_arg_count - 1>.
+  BytecodeArrayBuilder& Call(
+      Register callable, Register receiver_args, size_t receiver_arg_count,
+      int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
-  // Call the new operator. The |constructor| register is followed by
-  // |arg_count| consecutive registers containing arguments to be
-  // applied to the constructor.
+  BytecodeArrayBuilder& TailCall(Register callable, Register receiver_args,
+                                 size_t receiver_arg_count, int feedback_slot) {
+    return Call(callable, receiver_args, receiver_arg_count, feedback_slot,
+                TailCallMode::kAllow);
+  }
+
+  // Call the new operator. The accumulator holds the |new_target|.
+  // The |constructor| is in a register followed by |arg_count|
+  // consecutive arguments starting at |first_arg| for the constuctor
+  // invocation.
   BytecodeArrayBuilder& New(Register constructor, Register first_arg,
                             size_t arg_count);
 
   // Call the runtime function with |function_id|. The first argument should be
   // in |first_arg| and all subsequent arguments should be in registers
-  // <first_arg + 1> to <first_arg + 1 + arg_count>.
+  // <first_arg + 1> to <first_arg + arg_count - 1>.
   BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
                                     Register first_arg, size_t arg_count);
 
   // Call the runtime function with |function_id| that returns a pair of values.
   // The first argument should be in |first_arg| and all subsequent arguments
-  // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+  // should be in registers <first_arg + 1> to <first_arg + arg_count - 1>. The
   // return values will be returned in <first_return> and <first_return + 1>.
   BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
                                            Register first_arg, size_t arg_count,
                                            Register first_return);
 
   // Call the JS runtime function with |context_index|. The the receiver should
-  // be in |receiver| and all subsequent arguments should be in registers
-  // <receiver + 1> to <receiver + 1 + arg_count>.
-  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
-                                      size_t arg_count);
+  // be in |receiver_args| and all subsequent arguments should be in registers
+  // <receiver + 1> to <receiver + receiver_args_count - 1>.
+  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver_args,
+                                      size_t receiver_args_count);
 
   // Operators (register holds the lhs value, accumulator holds the rhs value).
-  BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
-                                        Strength strength);
+  BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
 
   // Count Operators (value stored in accumulator).
-  BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+  BytecodeArrayBuilder& CountOperation(Token::Value op);
 
   // Unary Operators.
   BytecodeArrayBuilder& LogicalNot();
@@ -195,11 +211,9 @@
   // Deletes property from an object. This expects that accumulator contains
   // the key to be deleted and the register contains a reference to the object.
   BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
-  BytecodeArrayBuilder& DeleteLookupSlot();
 
   // Tests.
-  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
-                                         Strength strength);
+  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
 
   // Casts.
   BytecodeArrayBuilder& CastAccumulatorToBoolean();
@@ -214,48 +228,65 @@
   BytecodeArrayBuilder& Jump(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
 
+  BytecodeArrayBuilder& StackCheck();
+
   BytecodeArrayBuilder& Throw();
+  BytecodeArrayBuilder& ReThrow();
   BytecodeArrayBuilder& Return();
 
+  // Debugger.
+  BytecodeArrayBuilder& Debugger();
+
   // Complex flow control.
-  BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
-                                     Register cache_length);
+  BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
   BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
-  BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
-                                  Register cache_array, Register index);
+  BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
+                                  Register cache_type_array_pair);
   BytecodeArrayBuilder& ForInStep(Register index);
 
+  // Exception handling.
+  BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
+  BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
+  BytecodeArrayBuilder& MarkTryEnd(int handler_id);
+
+  // Creates a new handler table entry and returns a {hander_id} identifying the
+  // entry, so that it can be referenced by above exception handling support.
+  int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+
+  void SetStatementPosition(Statement* stmt);
+  void SetExpressionPosition(Expression* expr);
+
   // Accessors
   Zone* zone() const { return zone_; }
+  TemporaryRegisterAllocator* temporary_register_allocator() {
+    return &temporary_allocator_;
+  }
+  const TemporaryRegisterAllocator* temporary_register_allocator() const {
+    return &temporary_allocator_;
+  }
+
+  void EnsureReturn(FunctionLiteral* literal);
 
  private:
-  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
-  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
-  Isolate* isolate() const { return isolate_; }
-  ConstantArrayBuilder* constant_array_builder() {
-    return &constant_array_builder_;
-  }
-  const ConstantArrayBuilder* constant_array_builder() const {
-    return &constant_array_builder_;
-  }
+  class PreviousBytecodeHelper;
+  friend class BytecodeRegisterAllocator;
 
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
   static Bytecode BytecodeForWideOperands(Bytecode bytecode);
-  static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
-  static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
   static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
-  static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
-                                        TypeofMode typeof_mode);
+  static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
   static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
   static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
   static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
+  static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
   static bool FitsInIdx8Operand(int value);
   static bool FitsInIdx8Operand(size_t value);
@@ -263,15 +294,17 @@
   static bool FitsInIdx16Operand(int value);
   static bool FitsInIdx16Operand(size_t value);
   static bool FitsInReg8Operand(Register value);
+  static bool FitsInReg8OperandUntranslated(Register value);
   static bool FitsInReg16Operand(Register value);
+  static bool FitsInReg16OperandUntranslated(Register value);
+
+  // RegisterMover interface.
+  void MoveRegisterUntranslated(Register from, Register to) override;
 
   static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
 
-  Register MapRegister(Register reg);
-  Register MapRegisters(Register reg, Register args_base, int args_length = 1);
-
   template <size_t N>
   INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
   void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
@@ -292,49 +325,54 @@
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
 
   void LeaveBasicBlock();
-  void EnsureReturn();
 
   bool OperandIsValid(Bytecode bytecode, int operand_index,
                       uint32_t operand_value) const;
-  bool LastBytecodeInSameBlock() const;
+  bool RegisterIsValid(Register reg, OperandType reg_type) const;
 
+  bool LastBytecodeInSameBlock() const;
   bool NeedToBooleanCast();
   bool IsRegisterInAccumulator(Register reg);
 
-  bool RegisterIsValid(Register reg) const;
-
-  // Temporary register management.
-  int BorrowTemporaryRegister();
-  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
-  void ReturnTemporaryRegister(int reg_index);
-  int PrepareForConsecutiveTemporaryRegisters(size_t count);
-  void BorrowConsecutiveTemporaryRegister(int reg_index);
-  bool TemporaryRegisterIsLive(Register reg) const;
-
-  Register first_temporary_register() const;
-  Register last_temporary_register() const;
+  // Set position for implicit return.
+  void SetReturnPosition(FunctionLiteral* fun);
 
   // Gets a constant pool entry for the |object|.
   size_t GetConstantPoolEntry(Handle<Object> object);
 
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+  Isolate* isolate() const { return isolate_; }
+  ConstantArrayBuilder* constant_array_builder() {
+    return &constant_array_builder_;
+  }
+  const ConstantArrayBuilder* constant_array_builder() const {
+    return &constant_array_builder_;
+  }
+  HandlerTableBuilder* handler_table_builder() {
+    return &handler_table_builder_;
+  }
+  SourcePositionTableBuilder* source_position_table_builder() {
+    return &source_position_table_builder_;
+  }
+  RegisterTranslator* register_translator() { return &register_translator_; }
+
   Isolate* isolate_;
   Zone* zone_;
   ZoneVector<uint8_t> bytecodes_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
+  HandlerTableBuilder handler_table_builder_;
+  SourcePositionTableBuilder source_position_table_builder_;
   size_t last_block_end_;
   size_t last_bytecode_start_;
   bool exit_seen_in_block_;
   int unbound_jumps_;
-
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
-  int temporary_register_count_;
-  ZoneSet<int> free_temporaries_;
-
-  class PreviousBytecodeHelper;
-  friend class BytecodeRegisterAllocator;
+  TemporaryRegisterAllocator temporary_allocator_;
+  RegisterTranslator register_translator_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index d09d72f..0fea985 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -47,14 +47,14 @@
       bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
       Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
   switch (Bytecodes::SizeOfOperand(operand_type)) {
-    default:
-    case OperandSize::kNone:
-      UNREACHABLE();
     case OperandSize::kByte:
       return static_cast<uint32_t>(*operand_start);
     case OperandSize::kShort:
       return ReadUnalignedUInt16(operand_start);
+    case OperandSize::kNone:
+      UNREACHABLE();
   }
+  return 0;
 }
 
 
@@ -63,12 +63,11 @@
   return static_cast<int8_t>(operand);
 }
 
-
-int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
   OperandSize size =
       Bytecodes::GetOperandSize(current_bytecode(), operand_index);
-  OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
-                                                  : OperandType::kCount16;
+  OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
+                                                  : OperandType::kRegCount16;
   uint32_t operand = GetRawOperand(operand_index, type);
   return static_cast<int>(operand);
 }
@@ -87,19 +86,63 @@
 Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kReg8 ||
-         operand_type == OperandType::kRegPair8 ||
-         operand_type == OperandType::kMaybeReg8 ||
-         operand_type == OperandType::kReg16);
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
   uint32_t operand = GetRawOperand(operand_index, operand_type);
-  return Register::FromOperand(operand);
+  Register reg;
+  switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
+    case OperandSize::kByte:
+      reg = Register::FromOperand(static_cast<uint8_t>(operand));
+      break;
+    case OperandSize::kShort:
+      reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
+      break;
+    case OperandSize::kNone:
+      UNREACHABLE();
+      reg = Register::invalid_value();
+      break;
+  }
+  DCHECK_GE(reg.index(),
+            Register::FromParameterIndex(0, bytecode_array()->parameter_count())
+                .index());
+  DCHECK(reg.index() < bytecode_array()->register_count() ||
+         (reg.index() == 0 &&
+          Bytecodes::IsMaybeRegisterOperandType(
+              Bytecodes::GetOperandType(current_bytecode(), operand_index))));
+  return reg;
 }
 
+int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
+  interpreter::OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  switch (operand_type) {
+    case OperandType::kRegPair8:
+    case OperandType::kRegPair16:
+    case OperandType::kRegOutPair8:
+    case OperandType::kRegOutPair16:
+      return 2;
+    case OperandType::kRegOutTriple8:
+    case OperandType::kRegOutTriple16:
+      return 3;
+    default: {
+      if (operand_index + 1 !=
+          Bytecodes::NumberOfOperands(current_bytecode())) {
+        OperandType next_operand_type =
+            Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
+        if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+          return GetRegisterCountOperand(operand_index + 1);
+        }
+      }
+      return 1;
+    }
+  }
+}
 
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
     int operand_index) const {
-  Handle<FixedArray> constants = handle(bytecode_array()->constant_pool());
-  return FixedArray::get(constants, GetIndexOperand(operand_index));
+  return FixedArray::get(bytecode_array()->constant_pool(),
+                         GetIndexOperand(operand_index),
+                         bytecode_array()->GetIsolate());
 }
 
 
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index e67fa97..5379bbf 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -21,6 +21,7 @@
   bool done() const;
   Bytecode current_bytecode() const;
   int current_bytecode_size() const;
+  void set_current_offset(int offset) { bytecode_offset_ = offset; }
   int current_offset() const { return bytecode_offset_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
@@ -28,8 +29,9 @@
 
   int8_t GetImmediateOperand(int operand_index) const;
   int GetIndexOperand(int operand_index) const;
-  int GetCountOperand(int operand_index) const;
+  int GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
+  int GetRegisterOperandRange(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
   // Get the raw byte for the given operand. Note: you should prefer using the
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 959e155..6f4dc27 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -27,19 +27,26 @@
       : generator_(generator),
         scope_(scope),
         outer_(generator_->execution_context()),
-        register_(generator_->NextContextRegister()),
+        register_(Register::current_context()),
         depth_(0),
         should_pop_context_(should_pop_context) {
     if (outer_) {
       depth_ = outer_->depth_ + 1;
-      generator_->builder()->PushContext(register_);
+
+      // Push the outer context into a new context register.
+      Register outer_context_reg(builder()->first_context_register().index() +
+                                 outer_->depth_);
+      outer_->set_register(outer_context_reg);
+      generator_->builder()->PushContext(outer_context_reg);
     }
     generator_->set_execution_context(this);
   }
 
   ~ContextScope() {
     if (outer_ && should_pop_context_) {
+      DCHECK_EQ(register_.index(), Register::current_context().index());
       generator_->builder()->PopContext(outer_->reg());
+      outer_->set_register(register_);
     }
     generator_->set_execution_context(outer_);
   }
@@ -67,6 +74,10 @@
   Register reg() const { return register_; }
 
  private:
+  const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+  void set_register(Register reg) { register_ = reg; }
+
   BytecodeGenerator* generator_;
   Scope* scope_;
   ContextScope* outer_;
@@ -81,30 +92,141 @@
 class BytecodeGenerator::ControlScope BASE_EMBEDDED {
  public:
   explicit ControlScope(BytecodeGenerator* generator)
-      : generator_(generator), outer_(generator->execution_control()) {
+      : generator_(generator), outer_(generator->execution_control()),
+        context_(generator->execution_context()) {
     generator_->set_execution_control(this);
   }
   virtual ~ControlScope() { generator_->set_execution_control(outer()); }
 
   void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
   void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+  void ReturnAccumulator() { PerformCommand(CMD_RETURN, nullptr); }
+  void ReThrowAccumulator() { PerformCommand(CMD_RETHROW, nullptr); }
+
+  class DeferredCommands;
 
  protected:
-  enum Command { CMD_BREAK, CMD_CONTINUE };
+  enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_RETHROW };
   void PerformCommand(Command command, Statement* statement);
   virtual bool Execute(Command command, Statement* statement) = 0;
 
   BytecodeGenerator* generator() const { return generator_; }
   ControlScope* outer() const { return outer_; }
+  ContextScope* context() const { return context_; }
 
  private:
   BytecodeGenerator* generator_;
   ControlScope* outer_;
+  ContextScope* context_;
 
   DISALLOW_COPY_AND_ASSIGN(ControlScope);
 };
 
 
+// Helper class for a try-finally control scope. It can record intercepted
+// control-flow commands that cause entry into a finally-block, and re-apply
+// them after again leaving that block. Special tokens are used to identify
+// paths going through the finally-block to dispatch after leaving the block.
+class BytecodeGenerator::ControlScope::DeferredCommands final {
+ public:
+  DeferredCommands(BytecodeGenerator* generator, Register token_register,
+                   Register result_register)
+      : generator_(generator),
+        deferred_(generator->zone()),
+        token_register_(token_register),
+        result_register_(result_register) {}
+
+  // One recorded control-flow command.
+  struct Entry {
+    Command command;       // The command type being applied on this path.
+    Statement* statement;  // The target statement for the command or {nullptr}.
+    int token;             // A token identifying this particular path.
+  };
+
+  // Records a control-flow command while entering the finally-block. This also
+  // generates a new dispatch token that identifies one particular path. This
+  // expects the result to be in the accumulator.
+  void RecordCommand(Command command, Statement* statement) {
+    int token = static_cast<int>(deferred_.size());
+    deferred_.push_back({command, statement, token});
+
+    builder()->StoreAccumulatorInRegister(result_register_);
+    builder()->LoadLiteral(Smi::FromInt(token));
+    builder()->StoreAccumulatorInRegister(token_register_);
+  }
+
+  // Records the dispatch token to be used to identify the re-throw path when
+  // the finally-block has been entered through the exception handler. This
+  // expects the exception to be in the accumulator.
+  void RecordHandlerReThrowPath() {
+    // The accumulator contains the exception object.
+    RecordCommand(CMD_RETHROW, nullptr);
+  }
+
+  // Records the dispatch token to be used to identify the implicit fall-through
+  // path at the end of a try-block into the corresponding finally-block.
+  void RecordFallThroughPath() {
+    builder()->LoadLiteral(Smi::FromInt(-1));
+    builder()->StoreAccumulatorInRegister(token_register_);
+  }
+
+  // Applies all recorded control-flow commands after the finally-block again.
+  // This generates a dynamic dispatch on the token from the entry point.
+  void ApplyDeferredCommands() {
+    // The fall-through path is covered by the default case, hence +1 here.
+    SwitchBuilder dispatch(builder(), static_cast<int>(deferred_.size() + 1));
+    for (size_t i = 0; i < deferred_.size(); ++i) {
+      Entry& entry = deferred_[i];
+      builder()->LoadLiteral(Smi::FromInt(entry.token));
+      builder()->CompareOperation(Token::EQ_STRICT, token_register_);
+      dispatch.Case(static_cast<int>(i));
+    }
+    dispatch.DefaultAt(static_cast<int>(deferred_.size()));
+    for (size_t i = 0; i < deferred_.size(); ++i) {
+      Entry& entry = deferred_[i];
+      dispatch.SetCaseTarget(static_cast<int>(i));
+      builder()->LoadAccumulatorWithRegister(result_register_);
+      execution_control()->PerformCommand(entry.command, entry.statement);
+    }
+    dispatch.SetCaseTarget(static_cast<int>(deferred_.size()));
+  }
+
+  BytecodeArrayBuilder* builder() { return generator_->builder(); }
+  ControlScope* execution_control() { return generator_->execution_control(); }
+
+ private:
+  BytecodeGenerator* generator_;
+  ZoneVector<Entry> deferred_;
+  Register token_register_;
+  Register result_register_;
+};
+
+
+// Scoped class for dealing with control flow reaching the function level.
+class BytecodeGenerator::ControlScopeForTopLevel final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  explicit ControlScopeForTopLevel(BytecodeGenerator* generator)
+      : ControlScope(generator) {}
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+        break;
+      case CMD_RETURN:
+        generator()->builder()->Return();
+        return true;
+      case CMD_RETHROW:
+        generator()->builder()->ReThrow();
+        return true;
+    }
+    return false;
+  }
+};
+
+
 // Scoped class for enabling break inside blocks and switch blocks.
 class BytecodeGenerator::ControlScopeForBreakable final
     : public BytecodeGenerator::ControlScope {
@@ -117,13 +239,15 @@
         control_builder_(control_builder) {}
 
  protected:
-  virtual bool Execute(Command command, Statement* statement) {
+  bool Execute(Command command, Statement* statement) override {
     if (statement != statement_) return false;
     switch (command) {
       case CMD_BREAK:
         control_builder_->Break();
         return true;
       case CMD_CONTINUE:
+      case CMD_RETURN:
+      case CMD_RETHROW:
         break;
     }
     return false;
@@ -148,7 +272,7 @@
         loop_builder_(loop_builder) {}
 
  protected:
-  virtual bool Execute(Command command, Statement* statement) {
+  bool Execute(Command command, Statement* statement) override {
     if (statement != statement_) return false;
     switch (command) {
       case CMD_BREAK:
@@ -157,6 +281,9 @@
       case CMD_CONTINUE:
         loop_builder_->Continue();
         return true;
+      case CMD_RETURN:
+      case CMD_RETHROW:
+        break;
     }
     return false;
   }
@@ -167,12 +294,84 @@
 };
 
 
+// Scoped class for enabling 'throw' in try-catch constructs.
+class BytecodeGenerator::ControlScopeForTryCatch final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForTryCatch(BytecodeGenerator* generator,
+                          TryCatchBuilder* try_catch_builder)
+      : ControlScope(generator) {
+    generator->try_catch_nesting_level_++;
+  }
+  virtual ~ControlScopeForTryCatch() {
+    generator()->try_catch_nesting_level_--;
+  }
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+      case CMD_RETURN:
+        break;
+      case CMD_RETHROW:
+        generator()->builder()->ReThrow();
+        return true;
+    }
+    return false;
+  }
+};
+
+
+// Scoped class for enabling control flow through try-finally constructs.
+class BytecodeGenerator::ControlScopeForTryFinally final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForTryFinally(BytecodeGenerator* generator,
+                            TryFinallyBuilder* try_finally_builder,
+                            DeferredCommands* commands)
+      : ControlScope(generator),
+        try_finally_builder_(try_finally_builder),
+        commands_(commands) {
+    generator->try_finally_nesting_level_++;
+  }
+  virtual ~ControlScopeForTryFinally() {
+    generator()->try_finally_nesting_level_--;
+  }
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+      case CMD_RETURN:
+      case CMD_RETHROW:
+        commands_->RecordCommand(command, statement);
+        try_finally_builder_->LeaveTry();
+        return true;
+    }
+    return false;
+  }
+
+ private:
+  TryFinallyBuilder* try_finally_builder_;
+  DeferredCommands* commands_;
+};
+
+
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
+  ContextScope* context = this->context();
   do {
-    if (current->Execute(command, statement)) return;
+    if (current->Execute(command, statement)) { return; }
     current = current->outer();
+    if (current->context() != context) {
+      // Pop context to the expected depth.
+      // TODO(rmcilroy): Only emit a single context pop.
+      generator()->builder()->PopContext(current->context()->reg());
+      context = current->context();
+    }
   } while (current != nullptr);
   UNREACHABLE();
 }
@@ -183,7 +382,8 @@
   explicit RegisterAllocationScope(BytecodeGenerator* generator)
       : generator_(generator),
         outer_(generator->register_allocator()),
-        allocator_(builder()) {
+        allocator_(builder()->zone(),
+                   builder()->temporary_register_allocator()) {
     generator_->set_register_allocator(this);
   }
 
@@ -205,11 +405,11 @@
       // walk the full context chain and compute the list of consecutive
       // reservations in the innerscopes.
       UNIMPLEMENTED();
-      return Register(-1);
+      return Register::invalid_value();
     }
   }
 
-  void PrepareForConsecutiveAllocations(size_t count) {
+  void PrepareForConsecutiveAllocations(int count) {
     allocator_.PrepareForConsecutiveAllocations(count);
   }
 
@@ -330,7 +530,7 @@
 
   virtual void SetResultInRegister(Register reg) {
     DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
-           (builder()->RegisterIsTemporary(reg) &&
+           (builder()->TemporaryRegisterIsLive(reg) &&
             !allocator()->RegisterIsAllocatedInThisScope(reg)));
     result_register_ = reg;
     set_result_identified();
@@ -342,32 +542,36 @@
   Register result_register_;
 };
 
-
 BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
     : isolate_(isolate),
       zone_(zone),
-      builder_(isolate, zone),
+      builder_(nullptr),
       info_(nullptr),
       scope_(nullptr),
       globals_(0, zone),
       execution_control_(nullptr),
       execution_context_(nullptr),
       execution_result_(nullptr),
-      register_allocator_(nullptr) {
+      register_allocator_(nullptr),
+      try_catch_nesting_level_(0),
+      try_finally_nesting_level_(0) {
   InitializeAstVisitor(isolate);
 }
 
-
 Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
   set_info(info);
   set_scope(info->scope());
 
+  // Initialize bytecode array builder.
+  set_builder(new (zone()) BytecodeArrayBuilder(
+      isolate(), zone(), info->num_parameters_including_this(),
+      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
 
-  builder()->set_parameter_count(info->num_parameters_including_this());
-  builder()->set_locals_count(scope()->num_stack_slots());
-  builder()->set_context_count(scope()->MaxNestedContextChainLength());
+  // Initialize control scope.
+  ControlScopeForTopLevel control(this);
 
   // Build function context only if there are context allocated variables.
   if (scope()->NeedsContext()) {
@@ -380,9 +584,10 @@
     MakeBytecodeBody();
   }
 
+  builder()->EnsureReturn(info->literal());
   set_scope(nullptr);
   set_info(nullptr);
-  return builder_.ToBytecodeArray();
+  return builder()->ToBytecodeArray();
 }
 
 
@@ -390,11 +595,10 @@
   // Build the arguments object if it is used.
   VisitArgumentsObject(scope()->arguments());
 
-  // TODO(mythria): Build rest arguments array if it is used.
+  // Build rest arguments array if it is used.
   int rest_index;
-  if (scope()->rest_parameter(&rest_index)) {
-    UNIMPLEMENTED();
-  }
+  Variable* rest_parameter = scope()->rest_parameter(&rest_index);
+  VisitRestArgumentsArray(rest_parameter);
 
   // Build assignment to {.this_function} variable if it is used.
   VisitThisFunctionVariable(scope()->this_function_var());
@@ -409,37 +613,40 @@
 
   // Visit illegal re-declaration and bail out if it exists.
   if (scope()->HasIllegalRedeclaration()) {
-    Visit(scope()->GetIllegalRedeclaration());
+    VisitForEffect(scope()->GetIllegalRedeclaration());
     return;
   }
 
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
 
+  // Perform a stack-check before the body.
+  builder()->StackCheck();
+
   // Visit statements in the function body.
   VisitStatements(info()->literal()->body());
 }
 
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
-  BlockBuilder block_builder(this->builder());
-  ControlScopeForBreakable execution_control(this, stmt, &block_builder);
-
-  if (stmt->scope() == NULL) {
-    // Visit statements in the same scope, no declarations.
-    VisitStatements(stmt->statements());
+  // Visit declarations and statements.
+  if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
+    VisitNewLocalBlockContext(stmt->scope());
+    ContextScope scope(this, stmt->scope());
+    VisitBlockDeclarationsAndStatements(stmt);
   } else {
-    // Visit declarations and statements in a block scope.
-    if (stmt->scope()->NeedsContext()) {
-      VisitNewLocalBlockContext(stmt->scope());
-      ContextScope scope(this, stmt->scope());
-      VisitDeclarations(stmt->scope()->declarations());
-      VisitStatements(stmt->statements());
-    } else {
-      VisitDeclarations(stmt->scope()->declarations());
-      VisitStatements(stmt->statements());
-    }
+    VisitBlockDeclarationsAndStatements(stmt);
   }
+}
+
+
+void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
+  BlockBuilder block_builder(builder());
+  ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+  if (stmt->scope() != nullptr) {
+    VisitDeclarations(stmt->scope()->declarations());
+  }
+  VisitStatements(stmt->statements());
   if (stmt->labels() != nullptr) block_builder.EndBlock();
 }
 
@@ -480,9 +687,31 @@
                                                   variable->index());
       }
       break;
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      DCHECK(IsDeclaredVariableMode(mode));
+
+      register_allocator()->PrepareForConsecutiveAllocations(3);
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register init_value = register_allocator()->NextConsecutiveRegister();
+      Register attributes = register_allocator()->NextConsecutiveRegister();
+
+      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+      if (hole_init) {
+        builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
+      } else {
+        // For variables, we must not use an initial value (such as 'undefined')
+        // because we may have a (legal) redeclaration and we must not destroy
+        // the current value.
+        builder()
+            ->LoadLiteral(Smi::FromInt(0))
+            .StoreAccumulatorInRegister(init_value);
+      }
+      builder()
+          ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+          .StoreAccumulatorInRegister(attributes)
+          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
       break;
+    }
   }
 }
 
@@ -503,7 +732,10 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
       VisitForAccumulatorValue(decl->fun());
-      VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+      DCHECK(variable->mode() == LET || variable->mode() == VAR ||
+             variable->mode() == CONST);
+      VisitVariableAssignment(variable, Token::INIT,
+                              FeedbackVectorSlot::Invalid());
       break;
     }
     case VariableLocation::CONTEXT: {
@@ -513,8 +745,20 @@
                                   variable->index());
       break;
     }
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      register_allocator()->PrepareForConsecutiveAllocations(3);
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register literal = register_allocator()->NextConsecutiveRegister();
+      Register attributes = register_allocator()->NextConsecutiveRegister();
+      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+
+      VisitForAccumulatorValue(decl->fun());
+      builder()
+          ->StoreAccumulatorInRegister(literal)
+          .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+          .StoreAccumulatorInRegister(attributes)
+          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+    }
   }
 }
 
@@ -533,7 +777,10 @@
     ZoneList<Declaration*>* declarations) {
   RegisterAllocationScope register_scope(this);
   DCHECK(globals()->empty());
-  AstVisitor::VisitDeclarations(declarations);
+  for (int i = 0; i < declarations->length(); i++) {
+    RegisterAllocationScope register_scope(this);
+    Visit(declarations->at(i));
+  }
   if (globals()->empty()) return;
   int array_index = 0;
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
@@ -569,6 +816,7 @@
 
 
 void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   VisitForEffect(stmt->expression());
 }
 
@@ -624,12 +872,16 @@
 
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   VisitForAccumulatorValue(stmt->expression());
-  builder()->Return();
+  builder()->SetStatementPosition(stmt);
+  execution_control()->ReturnAccumulator();
 }
 
 
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
-  UNIMPLEMENTED();
+  VisitForAccumulatorValue(stmt->expression());
+  builder()->CastAccumulatorToJSObject();
+  VisitNewLocalWithContext();
+  VisitInScope(stmt->statement(), stmt->scope());
 }
 
 
@@ -657,8 +909,7 @@
 
     // Perform label comparison as if via '===' with tag.
     VisitForAccumulatorValue(clause->label());
-    builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
-                                language_mode_strength());
+    builder()->CompareOperation(Token::Value::EQ_STRICT, tag);
     switch_builder.Case(i);
   }
 
@@ -688,20 +939,25 @@
   UNREACHABLE();
 }
 
+void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
+                                           LoopBuilder* loop_builder) {
+  ControlScopeForIteration execution_control(this, stmt, loop_builder);
+  builder()->StackCheck();
+  Visit(stmt->body());
+}
 
 void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
   loop_builder.LoopHeader();
   if (stmt->cond()->ToBooleanIsFalse()) {
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
   } else if (stmt->cond()->ToBooleanIsTrue()) {
     loop_builder.Condition();
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.JumpToHeader();
   } else {
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.JumpToHeaderIfTrue();
@@ -709,7 +965,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   if (stmt->cond()->ToBooleanIsFalse()) {
     // If the condition is false there is no need to generate the loop.
@@ -717,14 +972,13 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (!stmt->cond()->ToBooleanIsTrue()) {
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   loop_builder.JumpToHeader();
   loop_builder.EndLoop();
 }
@@ -741,15 +995,13 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   if (stmt->next() != nullptr) {
     loop_builder.Next();
     Visit(stmt->next());
@@ -770,7 +1022,7 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, slot);
+      VisitVariableAssignment(variable, Token::ASSIGN, slot);
       break;
     }
     case NAMED_PROPERTY: {
@@ -795,9 +1047,40 @@
                                     language_mode());
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      Register receiver = register_allocator()->NextConsecutiveRegister();
+      Register home_object = register_allocator()->NextConsecutiveRegister();
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), receiver);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(name);
+      BuildNamedSuperPropertyStore(receiver, home_object, name, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      Register receiver = register_allocator()->NextConsecutiveRegister();
+      Register home_object = register_allocator()->NextConsecutiveRegister();
+      Register key = register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), receiver);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      BuildKeyedSuperPropertyStore(receiver, home_object, key, value);
+      break;
+    }
   }
 }
 
@@ -810,7 +1093,6 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration control_scope(this, stmt, &loop_builder);
   BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
 
   // Prepare the state for executing ForIn.
@@ -821,10 +1103,14 @@
   builder()->CastAccumulatorToJSObject();
   builder()->JumpIfNull(&not_object_label);
   builder()->StoreAccumulatorInRegister(receiver);
-  Register cache_type = register_allocator()->NewRegister();
-  Register cache_array = register_allocator()->NewRegister();
-  Register cache_length = register_allocator()->NewRegister();
-  builder()->ForInPrepare(cache_type, cache_array, cache_length);
+
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+  Register cache_type = register_allocator()->NextConsecutiveRegister();
+  Register cache_array = register_allocator()->NextConsecutiveRegister();
+  Register cache_length = register_allocator()->NextConsecutiveRegister();
+  // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+  USE(cache_array);
+  builder()->ForInPrepare(cache_type);
 
   // Set up loop counter
   Register index = register_allocator()->NewRegister();
@@ -836,10 +1122,11 @@
   loop_builder.Condition();
   builder()->ForInDone(index, cache_length);
   loop_builder.BreakIfTrue();
-  builder()->ForInNext(receiver, cache_type, cache_array, index);
+  DCHECK(Register::AreContiguous(cache_type, cache_array));
+  builder()->ForInNext(receiver, index, cache_type);
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   loop_builder.Next();
   builder()->ForInStep(index);
   builder()->StoreAccumulatorInRegister(index);
@@ -852,31 +1139,127 @@
 
 
 void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  UNIMPLEMENTED();
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+
+  VisitForEffect(stmt->assign_iterator());
+
+  loop_builder.LoopHeader();
+  loop_builder.Next();
+  VisitForEffect(stmt->next_result());
+  VisitForAccumulatorValue(stmt->result_done());
+  loop_builder.BreakIfTrue();
+
+  VisitForEffect(stmt->assign_each());
+  VisitIterationBody(stmt, &loop_builder);
+  loop_builder.JumpToHeader();
+  loop_builder.EndLoop();
 }
 
 
 void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  if (FLAG_ignition_fake_try_catch) {
+  TryCatchBuilder try_control_builder(builder());
+  Register no_reg;
+
+  // Preserve the context in a dedicated register, so that it can be restored
+  // when the handler is entered by the stack-unwinding machinery.
+  // TODO(mstarzinger): Be smarter about register allocation.
+  Register context = register_allocator()->NewRegister();
+  builder()->MoveRegister(Register::current_context(), context);
+
+  // Evaluate the try-block inside a control scope. This simulates a handler
+  // that is intercepting 'throw' control commands.
+  try_control_builder.BeginTry(context);
+  {
+    ControlScopeForTryCatch scope(this, &try_control_builder);
     Visit(stmt->try_block());
-    return;
   }
-  UNIMPLEMENTED();
+  try_control_builder.EndTry();
+
+  // Create a catch scope that binds the exception.
+  VisitNewLocalCatchContext(stmt->variable());
+  builder()->StoreAccumulatorInRegister(context);
+
+  // Clear message object as we enter the catch block.
+  builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+
+  // Load the catch context into the accumulator.
+  builder()->LoadAccumulatorWithRegister(context);
+
+  // Evaluate the catch-block.
+  VisitInScope(stmt->catch_block(), stmt->scope());
+  try_control_builder.EndCatch();
 }
 
 
 void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  if (FLAG_ignition_fake_try_catch) {
+  TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
+  Register no_reg;
+
+  // We keep a record of all paths that enter the finally-block to be able to
+  // dispatch to the correct continuation point after the statements in the
+  // finally-block have been evaluated.
+  //
+  // The try-finally construct can enter the finally-block in three ways:
+  // 1. By exiting the try-block normally, falling through at the end.
+  // 2. By exiting the try-block with a function-local control flow transfer
+  //    (i.e. through break/continue/return statements).
+  // 3. By exiting the try-block with a thrown exception.
+  //
+  // The result register semantics depend on how the block was entered:
+  //  - ReturnStatement: It represents the return value being returned.
+  //  - ThrowStatement: It represents the exception being thrown.
+  //  - BreakStatement/ContinueStatement: Undefined and not used.
+  //  - Falling through into finally-block: Undefined and not used.
+  Register token = register_allocator()->NewRegister();
+  Register result = register_allocator()->NewRegister();
+  ControlScope::DeferredCommands commands(this, token, result);
+
+  // Preserve the context in a dedicated register, so that it can be restored
+  // when the handler is entered by the stack-unwinding machinery.
+  // TODO(mstarzinger): Be smarter about register allocation.
+  Register context = register_allocator()->NewRegister();
+  builder()->MoveRegister(Register::current_context(), context);
+
+  // Evaluate the try-block inside a control scope. This simulates a handler
+  // that is intercepting all control commands.
+  try_control_builder.BeginTry(context);
+  {
+    ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
     Visit(stmt->try_block());
-    Visit(stmt->finally_block());
-    return;
   }
-  UNIMPLEMENTED();
+  try_control_builder.EndTry();
+
+  // Record fall-through and exception cases.
+  commands.RecordFallThroughPath();
+  try_control_builder.LeaveTry();
+  try_control_builder.BeginHandler();
+  commands.RecordHandlerReThrowPath();
+
+  // Pending message object is saved on entry.
+  try_control_builder.BeginFinally();
+  Register message = context;  // Reuse register.
+
+  // Clear message object as we enter the finally block.
+  builder()
+      ->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0)
+      .StoreAccumulatorInRegister(message);
+
+  // Evaluate the finally-block.
+  Visit(stmt->finally_block());
+  try_control_builder.EndFinally();
+
+  // Pending message object is restored on exit.
+  builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message, 1);
+
+  // Dynamic dispatch after the finally-block.
+  commands.ApplyDeferredCommands();
 }
 
 
 void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  UNIMPLEMENTED();
+  builder()->SetStatementPosition(stmt);
+  builder()->Debugger();
 }
 
 
@@ -892,18 +1275,166 @@
 
 
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
-  UNIMPLEMENTED();
+  if (expr->scope()->ContextLocalCount() > 0) {
+    VisitNewLocalBlockContext(expr->scope());
+    ContextScope scope(this, expr->scope());
+    VisitDeclarations(expr->scope()->declarations());
+    VisitClassLiteralContents(expr);
+  } else {
+    VisitDeclarations(expr->scope()->declarations());
+    VisitClassLiteralContents(expr);
+  }
 }
 
+void BytecodeGenerator::VisitClassLiteralContents(ClassLiteral* expr) {
+  VisitClassLiteralForRuntimeDefinition(expr);
+
+  // Load the "prototype" from the constructor.
+  register_allocator()->PrepareForConsecutiveAllocations(2);
+  Register literal = register_allocator()->NextConsecutiveRegister();
+  Register prototype = register_allocator()->NextConsecutiveRegister();
+  Handle<String> name = isolate()->factory()->prototype_string();
+  FeedbackVectorSlot slot = expr->PrototypeSlot();
+  builder()
+      ->StoreAccumulatorInRegister(literal)
+      .LoadNamedProperty(literal, name, feedback_index(slot))
+      .StoreAccumulatorInRegister(prototype);
+
+  VisitClassLiteralProperties(expr, literal, prototype);
+  builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+  // Assign to class variable.
+  if (expr->class_variable_proxy() != nullptr) {
+    Variable* var = expr->class_variable_proxy()->var();
+    FeedbackVectorSlot slot = expr->NeedsProxySlot()
+                                  ? expr->ProxySlot()
+                                  : FeedbackVectorSlot::Invalid();
+    VisitVariableAssignment(var, Token::INIT, slot);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
+    ClassLiteral* expr) {
+  AccumulatorResultScope result_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(4);
+  Register extends = register_allocator()->NextConsecutiveRegister();
+  Register constructor = register_allocator()->NextConsecutiveRegister();
+  Register start_position = register_allocator()->NextConsecutiveRegister();
+  Register end_position = register_allocator()->NextConsecutiveRegister();
+
+  VisitForAccumulatorValueOrTheHole(expr->extends());
+  builder()->StoreAccumulatorInRegister(extends);
+
+  VisitForAccumulatorValue(expr->constructor());
+  builder()
+      ->StoreAccumulatorInRegister(constructor)
+      .LoadLiteral(Smi::FromInt(expr->start_position()))
+      .StoreAccumulatorInRegister(start_position)
+      .LoadLiteral(Smi::FromInt(expr->end_position()))
+      .StoreAccumulatorInRegister(end_position)
+      .CallRuntime(Runtime::kDefineClass, extends, 4);
+  result_scope.SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
+                                                    Register literal,
+                                                    Register prototype) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(5);
+  Register receiver = register_allocator()->NextConsecutiveRegister();
+  Register key = register_allocator()->NextConsecutiveRegister();
+  Register value = register_allocator()->NextConsecutiveRegister();
+  Register attr = register_allocator()->NextConsecutiveRegister();
+  Register set_function_name = register_allocator()->NextConsecutiveRegister();
+
+  bool attr_assigned = false;
+  Register old_receiver = Register::invalid_value();
+
+  // Create nodes to store method values into the literal.
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+
+    // Set-up receiver.
+    Register new_receiver = property->is_static() ? literal : prototype;
+    if (new_receiver != old_receiver) {
+      builder()->MoveRegister(new_receiver, receiver);
+      old_receiver = new_receiver;
+    }
+
+    VisitForAccumulatorValue(property->key());
+    builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+    // The static prototype property is read only. We handle the non computed
+    // property name case in the parser. Since this is the only case where we
+    // need to check for an own read only property we special case this so we do
+    // not need to do this for every property.
+    if (property->is_static() && property->is_computed_name()) {
+      VisitClassLiteralStaticPrototypeWithComputedName(key);
+    }
+    VisitForAccumulatorValue(property->value());
+    builder()->StoreAccumulatorInRegister(value);
+
+    VisitSetHomeObject(value, receiver, property);
+
+    if (!attr_assigned) {
+      builder()
+          ->LoadLiteral(Smi::FromInt(DONT_ENUM))
+          .StoreAccumulatorInRegister(attr);
+      attr_assigned = true;
+    }
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Invalid properties for ES6 classes.
+        UNREACHABLE();
+        break;
+      case ObjectLiteral::Property::COMPUTED: {
+        builder()
+            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+            .StoreAccumulatorInRegister(set_function_name);
+        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, receiver,
+                               5);
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+                               receiver, 4);
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+                               receiver, 4);
+        break;
+      }
+    }
+  }
+}
+
+void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
+    Register key) {
+  BytecodeLabel done;
+  builder()
+      ->LoadLiteral(isolate()->factory()->prototype_string())
+      .CompareOperation(Token::Value::EQ_STRICT, key)
+      .JumpIfFalse(&done)
+      .CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
+      .Bind(&done);
+}
 
 void BytecodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
-  UNIMPLEMENTED();
+  // Find or build a shared function info for the native function template.
+  Handle<SharedFunctionInfo> shared_info =
+      Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
+  builder()->CreateClosure(shared_info, NOT_TENURED);
+  execution_result()->SetResultInAccumulator();
 }
 
 
 void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
-  UNIMPLEMENTED();
+  VisitBlock(expr->block());
+  VisitVariableProxy(expr->result());
 }
 
 
@@ -964,10 +1495,13 @@
   builder()->CreateObjectLiteral(expr->constant_properties(),
                                  expr->literal_index(),
                                  expr->ComputeFlags(true));
-  Register literal;
+
+  // Allocate in the outer scope since this register is used to return the
+  // expression's results to the caller.
+  Register literal = register_allocator()->outer()->NewRegister();
+  builder()->StoreAccumulatorInRegister(literal);
 
   // Store computed values into the literal.
-  bool literal_in_accumulator = true;
   int property_index = 0;
   AccessorTable accessor_table(zone());
   for (; property_index < expr->properties()->length(); property_index++) {
@@ -975,12 +1509,6 @@
     if (property->is_computed_name()) break;
     if (property->IsCompileTimeValue()) continue;
 
-    if (literal_in_accumulator) {
-      literal = register_allocator()->NewRegister();
-      builder()->StoreAccumulatorInRegister(literal);
-      literal_in_accumulator = false;
-    }
-
     RegisterAllocationScope inner_register_scope(this);
     Literal* literal_key = property->key()->AsLiteral();
     switch (property->kind()) {
@@ -995,21 +1523,31 @@
         if (literal_key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(property->value());
-            builder()->StoreNamedProperty(
-                literal, literal_key->AsPropertyName(),
-                feedback_index(property->GetSlot(0)), language_mode());
+            if (FunctionLiteral::NeedsHomeObject(property->value())) {
+              RegisterAllocationScope register_scope(this);
+              Register value = register_allocator()->NewRegister();
+              builder()->StoreAccumulatorInRegister(value);
+              builder()->StoreNamedProperty(
+                  literal, literal_key->AsPropertyName(),
+                  feedback_index(property->GetSlot(0)), language_mode());
+              VisitSetHomeObject(value, literal, property, 1);
+            } else {
+              builder()->StoreNamedProperty(
+                  literal, literal_key->AsPropertyName(),
+                  feedback_index(property->GetSlot(0)), language_mode());
+            }
           } else {
             VisitForEffect(property->value());
           }
         } else {
-          register_allocator()->PrepareForConsecutiveAllocations(3);
+          register_allocator()->PrepareForConsecutiveAllocations(4);
+          Register literal_argument =
+              register_allocator()->NextConsecutiveRegister();
           Register key = register_allocator()->NextConsecutiveRegister();
           Register value = register_allocator()->NextConsecutiveRegister();
           Register language = register_allocator()->NextConsecutiveRegister();
-          // TODO(oth): This is problematic - can't assume contiguous here.
-          // literal is allocated in outer register scope, whereas key, value,
-          // language are in another.
-          DCHECK(Register::AreContiguous(literal, key, value, language));
+
+          builder()->MoveRegister(literal, literal_argument);
           VisitForAccumulatorValue(property->key());
           builder()->StoreAccumulatorInRegister(key);
           VisitForAccumulatorValue(property->value());
@@ -1018,20 +1556,23 @@
             builder()
                 ->LoadLiteral(Smi::FromInt(SLOPPY))
                 .StoreAccumulatorInRegister(language)
-                .CallRuntime(Runtime::kSetProperty, literal, 4);
+                .CallRuntime(Runtime::kSetProperty, literal_argument, 4);
             VisitSetHomeObject(value, literal, property);
           }
         }
         break;
       }
       case ObjectLiteral::Property::PROTOTYPE: {
-        register_allocator()->PrepareForConsecutiveAllocations(1);
         DCHECK(property->emit_store());
+        register_allocator()->PrepareForConsecutiveAllocations(2);
+        Register literal_argument =
+            register_allocator()->NextConsecutiveRegister();
         Register value = register_allocator()->NextConsecutiveRegister();
-        DCHECK(Register::AreContiguous(literal, value));
+
+        builder()->MoveRegister(literal, literal_argument);
         VisitForAccumulatorValue(property->value());
         builder()->StoreAccumulatorInRegister(value).CallRuntime(
-            Runtime::kInternalSetPrototype, literal, 2);
+            Runtime::kInternalSetPrototype, literal_argument, 2);
         break;
       }
       case ObjectLiteral::Property::GETTER:
@@ -1052,12 +1593,14 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end(); ++it) {
     RegisterAllocationScope inner_register_scope(this);
-    register_allocator()->PrepareForConsecutiveAllocations(4);
+    register_allocator()->PrepareForConsecutiveAllocations(5);
+    Register literal_argument = register_allocator()->NextConsecutiveRegister();
     Register name = register_allocator()->NextConsecutiveRegister();
     Register getter = register_allocator()->NextConsecutiveRegister();
     Register setter = register_allocator()->NextConsecutiveRegister();
     Register attr = register_allocator()->NextConsecutiveRegister();
-    DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+
+    builder()->MoveRegister(literal, literal_argument);
     VisitForAccumulatorValue(it->first);
     builder()->StoreAccumulatorInRegister(name);
     VisitObjectLiteralAccessor(literal, it->second->getter, getter);
@@ -1065,7 +1608,8 @@
     builder()
         ->LoadLiteral(Smi::FromInt(NONE))
         .StoreAccumulatorInRegister(attr)
-        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked,
+                     literal_argument, 5);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1078,67 +1622,69 @@
   // compile them into a series of "SetOwnProperty" runtime calls. This will
   // preserve insertion order.
   for (; property_index < expr->properties()->length(); property_index++) {
-    if (literal_in_accumulator) {
-      literal = register_allocator()->NewRegister();
-      builder()->StoreAccumulatorInRegister(literal);
-      literal_in_accumulator = false;
-    }
-
     ObjectLiteral::Property* property = expr->properties()->at(property_index);
     RegisterAllocationScope inner_register_scope(this);
+
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(property->emit_store());
-      Register value = register_allocator()->NewRegister();
-      DCHECK(Register::AreContiguous(literal, value));
+      register_allocator()->PrepareForConsecutiveAllocations(2);
+      Register literal_argument =
+          register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+
+      builder()->MoveRegister(literal, literal_argument);
       VisitForAccumulatorValue(property->value());
       builder()->StoreAccumulatorInRegister(value).CallRuntime(
-          Runtime::kInternalSetPrototype, literal, 2);
+          Runtime::kInternalSetPrototype, literal_argument, 2);
       continue;
     }
 
-    register_allocator()->PrepareForConsecutiveAllocations(3);
+    register_allocator()->PrepareForConsecutiveAllocations(5);
+    Register literal_argument = register_allocator()->NextConsecutiveRegister();
     Register key = register_allocator()->NextConsecutiveRegister();
     Register value = register_allocator()->NextConsecutiveRegister();
     Register attr = register_allocator()->NextConsecutiveRegister();
-    DCHECK(Register::AreContiguous(literal, key, value, attr));
+    DCHECK(Register::AreContiguous(literal_argument, key, value, attr));
+    Register set_function_name =
+        register_allocator()->NextConsecutiveRegister();
 
+    builder()->MoveRegister(literal, literal_argument);
     VisitForAccumulatorValue(property->key());
     builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
     VisitForAccumulatorValue(property->value());
     builder()->StoreAccumulatorInRegister(value);
     VisitSetHomeObject(value, literal, property);
     builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
-    Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        function_id = Runtime::kDefineDataPropertyUnchecked;
+        builder()
+            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+            .StoreAccumulatorInRegister(set_function_name);
+        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral,
+                               literal_argument, 5);
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();  // Handled specially above.
         break;
       case ObjectLiteral::Property::GETTER:
-        function_id = Runtime::kDefineGetterPropertyUnchecked;
+        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+                               literal_argument, 4);
         break;
       case ObjectLiteral::Property::SETTER:
-        function_id = Runtime::kDefineSetterPropertyUnchecked;
+        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+                               literal_argument, 4);
         break;
     }
-    builder()->CallRuntime(function_id, literal, 4);
   }
 
   // Transform literals that contain functions to fast properties.
   if (expr->has_function()) {
-    DCHECK(!literal_in_accumulator);
     builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
   }
 
-  if (!literal_in_accumulator) {
-    // Restore literal array into accumulator.
-    builder()->LoadAccumulatorWithRegister(literal);
-  }
-  execution_result()->SetResultInAccumulator();
+  execution_result()->SetResultInRegister(literal);
 }
 
 
@@ -1156,10 +1702,7 @@
        array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-    if (subexpr->IsSpread()) {
-      // TODO(rmcilroy): Deal with spread expressions.
-      UNIMPLEMENTED();
-    }
+    DCHECK(!subexpr->IsSpread());
 
     if (literal_in_accumulator) {
       index = register_allocator()->NewRegister();
@@ -1189,14 +1732,25 @@
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
 }
 
+void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
+                                                      Handle<String> name) {
+  if (mode == CONST_LEGACY) {
+    BytecodeLabel end_label;
+    builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
+  } else if (mode == LET || mode == CONST) {
+    BuildThrowIfHole(name);
+  }
+}
 
 void BytecodeGenerator::VisitVariableLoad(Variable* variable,
                                           FeedbackVectorSlot slot,
                                           TypeofMode typeof_mode) {
+  VariableMode mode = variable->mode();
   switch (variable->location()) {
     case VariableLocation::LOCAL: {
       Register source(Register(variable->index()));
       builder()->LoadAccumulatorWithRegister(source);
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1205,13 +1759,14 @@
       // index -1 but is parameter index 0 in BytecodeArrayBuilder).
       Register source = builder()->Parameter(variable->index() + 1);
       builder()->LoadAccumulatorWithRegister(source);
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
       break;
     }
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       builder()->LoadGlobal(variable->name(), feedback_index(slot),
-                            language_mode(), typeof_mode);
+                            typeof_mode);
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1237,10 +1792,10 @@
               .StoreAccumulatorInRegister(context_reg);
         }
       }
+
       builder()->LoadContextSlot(context_reg, variable->index());
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
-      // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
-      // let variables.
       break;
     }
     case VariableLocation::LOOKUP: {
@@ -1251,14 +1806,12 @@
   }
 }
 
-
 void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
     Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
   AccumulatorResultScope accumulator_result(this);
   VisitVariableLoad(variable, slot, typeof_mode);
 }
 
-
 Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
     Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
   RegisterResultScope register_scope(this);
@@ -1266,20 +1819,150 @@
   return register_scope.ResultRegister();
 }
 
+void BytecodeGenerator::BuildNamedSuperPropertyLoad(Register receiver,
+                                                    Register home_object,
+                                                    Register name) {
+  DCHECK(Register::AreContiguous(receiver, home_object, name));
+  builder()->CallRuntime(Runtime::kLoadFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyLoad(Register receiver,
+                                                    Register home_object,
+                                                    Register key) {
+  DCHECK(Register::AreContiguous(receiver, home_object, key));
+  builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildNamedSuperPropertyStore(Register receiver,
+                                                     Register home_object,
+                                                     Register name,
+                                                     Register value) {
+  DCHECK(Register::AreContiguous(receiver, home_object, name, value));
+  Runtime::FunctionId function_id = is_strict(language_mode())
+                                        ? Runtime::kStoreToSuper_Strict
+                                        : Runtime::kStoreToSuper_Sloppy;
+  builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
+                                                     Register home_object,
+                                                     Register key,
+                                                     Register value) {
+  DCHECK(Register::AreContiguous(receiver, home_object, key, value));
+  Runtime::FunctionId function_id = is_strict(language_mode())
+                                        ? Runtime::kStoreKeyedToSuper_Strict
+                                        : Runtime::kStoreKeyedToSuper_Sloppy;
+  builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
+  RegisterAllocationScope register_scope(this);
+  Register name_reg = register_allocator()->NewRegister();
+  builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
+      Runtime::kThrowReferenceError, name_reg, 1);
+}
+
+void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
+  // TODO(interpreter): Can the parser reduce the number of checks
+  // performed? Or should there be a ThrowIfHole bytecode.
+  BytecodeLabel no_reference_error;
+  builder()->JumpIfNotHole(&no_reference_error);
+  BuildThrowReferenceError(name);
+  builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
+  // TODO(interpreter): Can the parser reduce the number of checks
+  // performed? Or should there be a ThrowIfNotHole bytecode.
+  BytecodeLabel no_reference_error, reference_error;
+  builder()
+      ->JumpIfNotHole(&reference_error)
+      .Jump(&no_reference_error)
+      .Bind(&reference_error);
+  BuildThrowReferenceError(name);
+  builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowReassignConstant(Handle<String> name) {
+  // TODO(mythria): This will be replaced by a new bytecode that throws an
+  // appropriate error depending on the whether the value is a hole or not.
+  BytecodeLabel const_assign_error;
+  builder()->JumpIfNotHole(&const_assign_error);
+  BuildThrowReferenceError(name);
+  builder()
+      ->Bind(&const_assign_error)
+      .CallRuntime(Runtime::kThrowConstAssignError, Register(), 0);
+}
+
+void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
+                                                            Token::Value op) {
+  VariableMode mode = variable->mode();
+  DCHECK(mode != CONST_LEGACY);
+  if (mode == CONST && op != Token::INIT) {
+    // Non-intializing assignments to constant is not allowed.
+    BuildThrowReassignConstant(variable->name());
+  } else if (mode == LET && op != Token::INIT) {
+    // Perform an initialization check for let declared variables.
+    // E.g. let x = (x = 20); is not allowed.
+    BuildThrowIfHole(variable->name());
+  } else {
+    DCHECK(variable->is_this() && mode == CONST && op == Token::INIT);
+    // Perform an initialization check for 'this'. 'this' variable is the
+    // only variable able to trigger bind operations outside the TDZ
+    // via 'super' calls.
+    BuildThrowIfNotHole(variable->name());
+  }
+}
 
 void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+                                                Token::Value op,
                                                 FeedbackVectorSlot slot) {
+  VariableMode mode = variable->mode();
+  RegisterAllocationScope assignment_register_scope(this);
+  BytecodeLabel end_label;
+  bool hole_check_required =
+      (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
+      (mode == CONST && op != Token::INIT) ||
+      (mode == CONST && op == Token::INIT && variable->is_this());
   switch (variable->location()) {
+    case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
-      // TODO(rmcilroy): support const mode initialization.
-      Register destination(variable->index());
-      builder()->StoreAccumulatorInRegister(destination);
-      break;
-    }
-    case VariableLocation::PARAMETER: {
-      // The parameter indices are shifted by 1 (receiver is variable
-      // index -1 but is parameter index 0 in BytecodeArrayBuilder).
-      Register destination(builder()->Parameter(variable->index() + 1));
+      Register destination;
+      if (VariableLocation::PARAMETER == variable->location()) {
+        destination = Register(builder()->Parameter(variable->index() + 1));
+      } else {
+        destination = Register(variable->index());
+      }
+
+      if (hole_check_required) {
+        // Load destination to check for hole.
+        Register value_temp = register_allocator()->NewRegister();
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadAccumulatorWithRegister(destination);
+
+        if (mode == CONST_LEGACY && op == Token::INIT) {
+          // Perform an intialization check for legacy constants.
+          builder()
+              ->JumpIfNotHole(&end_label)
+              .MoveRegister(value_temp, destination)
+              .Bind(&end_label)
+              .LoadAccumulatorWithRegister(value_temp);
+          // Break here because the value should not be stored unconditionally.
+          break;
+        } else if (mode == CONST_LEGACY && op != Token::INIT) {
+          DCHECK(!is_strict(language_mode()));
+          // Ensure accumulator is in the correct state.
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here, non-initializing assignments to legacy constants are
+          // ignored.
+          break;
+        } else {
+          BuildHoleCheckForVariableAssignment(variable, op);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+        }
+      }
+
       builder()->StoreAccumulatorInRegister(destination);
       break;
     }
@@ -1290,10 +1973,10 @@
       break;
     }
     case VariableLocation::CONTEXT: {
-      // TODO(rmcilroy): support const mode initialization.
       int depth = execution_context()->ContextChainDepth(variable->scope());
       ContextScope* context = execution_context()->Previous(depth);
       Register context_reg;
+
       if (context) {
         context_reg = context->reg();
       } else {
@@ -1315,11 +1998,63 @@
         }
         builder()->LoadAccumulatorWithRegister(value_temp);
       }
+
+      if (hole_check_required) {
+        // Load destination to check for hole.
+        Register value_temp = register_allocator()->NewRegister();
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadContextSlot(context_reg, variable->index());
+
+        if (mode == CONST_LEGACY && op == Token::INIT) {
+          // Perform an intialization check for legacy constants.
+          builder()
+              ->JumpIfNotHole(&end_label)
+              .LoadAccumulatorWithRegister(value_temp)
+              .StoreContextSlot(context_reg, variable->index())
+              .Bind(&end_label);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here because the value should not be stored unconditionally.
+          // The above code performs the store conditionally.
+          break;
+        } else if (mode == CONST_LEGACY && op != Token::INIT) {
+          DCHECK(!is_strict(language_mode()));
+          // Ensure accumulator is in the correct state.
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here, non-initializing assignments to legacy constants are
+          // ignored.
+          break;
+        } else {
+          BuildHoleCheckForVariableAssignment(variable, op);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+        }
+      }
+
       builder()->StoreContextSlot(context_reg, variable->index());
       break;
     }
     case VariableLocation::LOOKUP: {
-      builder()->StoreLookupSlot(variable->name(), language_mode());
+      if (mode == CONST_LEGACY && op == Token::INIT) {
+        register_allocator()->PrepareForConsecutiveAllocations(3);
+        Register value = register_allocator()->NextConsecutiveRegister();
+        Register context = register_allocator()->NextConsecutiveRegister();
+        Register name = register_allocator()->NextConsecutiveRegister();
+
+        // InitializeLegacyConstLookupSlot runtime call returns the 'value'
+        // passed to it. So, accumulator will have its original contents when
+        // runtime call returns.
+        builder()
+            ->StoreAccumulatorInRegister(value)
+            .MoveRegister(execution_context()->reg(), context)
+            .LoadLiteral(variable->name())
+            .StoreAccumulatorInRegister(name)
+            .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
+      } else if (mode == CONST_LEGACY && op != Token::INIT) {
+        // Non-intializing assignments to legacy constants are ignored.
+        DCHECK(!is_strict(language_mode()));
+      } else {
+        builder()->StoreLookupSlot(variable->name(), language_mode());
+      }
       break;
     }
   }
@@ -1327,8 +2062,8 @@
 
 
 void BytecodeGenerator::VisitAssignment(Assignment* expr) {
-  DCHECK(expr->target()->IsValidReferenceExpression());
-  Register object, key;
+  DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+  Register object, key, home_object, value;
   Handle<String> name;
 
   // Left-hand side can only be a property, a global or a variable slot.
@@ -1358,9 +2093,35 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(key);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      break;
+    }
   }
 
   // Evaluate the value and potentially handle compound assignments by loading
@@ -1378,8 +2139,7 @@
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
         old_value = register_allocator()->NewRegister();
         builder()
-            ->LoadNamedProperty(object, name, feedback_index(slot),
-                                language_mode())
+            ->LoadNamedProperty(object, name, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
         break;
       }
@@ -1389,18 +2149,25 @@
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
         old_value = register_allocator()->NewRegister();
         builder()
-            ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+            ->LoadKeyedProperty(object, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
         break;
       }
-      case NAMED_SUPER_PROPERTY:
-      case KEYED_SUPER_PROPERTY:
-        UNIMPLEMENTED();
+      case NAMED_SUPER_PROPERTY: {
+        old_value = register_allocator()->NewRegister();
+        BuildNamedSuperPropertyLoad(object, home_object, key);
+        builder()->StoreAccumulatorInRegister(old_value);
         break;
+      }
+      case KEYED_SUPER_PROPERTY: {
+        old_value = register_allocator()->NewRegister();
+        BuildKeyedSuperPropertyLoad(object, home_object, key);
+        builder()->StoreAccumulatorInRegister(old_value);
+        break;
+      }
     }
     VisitForAccumulatorValue(expr->value());
-    builder()->BinaryOperation(expr->binary_op(), old_value,
-                               language_mode_strength());
+    builder()->BinaryOperation(expr->binary_op(), old_value);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1412,7 +2179,7 @@
       // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
       // Is the value in the accumulator safe? Yes, but scary.
       Variable* variable = expr->target()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, slot);
+      VisitVariableAssignment(variable, expr->op(), slot);
       break;
     }
     case NAMED_PROPERTY:
@@ -1423,9 +2190,16 @@
       builder()->StoreKeyedProperty(object, key, feedback_index(slot),
                                     language_mode());
       break;
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
   }
   execution_result()->SetResultInAccumulator();
 }
@@ -1437,6 +2211,11 @@
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
   builder()->Throw();
+  // Throw statments are modeled as expression instead of statments. These are
+  // converted from assignment statements in Rewriter::ReWrite pass. An
+  // assignment statement expects a value in the accumulator. This is a hack to
+  // avoid DCHECK fails assert accumulator has been set.
+  execution_result()->SetResultInAccumulator();
 }
 
 
@@ -1449,34 +2228,84 @@
     case NAMED_PROPERTY: {
       builder()->LoadNamedProperty(obj,
                                    expr->key()->AsLiteral()->AsPropertyName(),
-                                   feedback_index(slot), language_mode());
+                                   feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
       VisitForAccumulatorValue(expr->key());
-      builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+      builder()->LoadKeyedProperty(obj, feedback_index(slot));
       break;
     }
     case NAMED_SUPER_PROPERTY:
+      VisitNamedSuperPropertyLoad(expr, Register::invalid_value());
+      break;
     case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+      VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
+      break;
   }
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
                                                         Property* expr) {
   AccumulatorResultScope result_scope(this);
   VisitPropertyLoad(obj, expr);
 }
 
+void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
+                                                    Register opt_receiver_out) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(3);
 
-void BytecodeGenerator::VisitProperty(Property* expr) {
-  Register obj = VisitForRegisterValue(expr->obj());
-  VisitPropertyLoad(obj, expr);
+  Register receiver, home_object, name;
+  receiver = register_allocator()->NextConsecutiveRegister();
+  home_object = register_allocator()->NextConsecutiveRegister();
+  name = register_allocator()->NextConsecutiveRegister();
+  SuperPropertyReference* super_property =
+      property->obj()->AsSuperPropertyReference();
+  VisitForRegisterValue(super_property->this_var(), receiver);
+  VisitForRegisterValue(super_property->home_object(), home_object);
+  builder()
+      ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+      .StoreAccumulatorInRegister(name);
+  BuildNamedSuperPropertyLoad(receiver, home_object, name);
+
+  if (opt_receiver_out.is_valid()) {
+    builder()->MoveRegister(receiver, opt_receiver_out);
+  }
 }
 
+void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
+                                                    Register opt_receiver_out) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+
+  Register receiver, home_object, key;
+  receiver = register_allocator()->NextConsecutiveRegister();
+  home_object = register_allocator()->NextConsecutiveRegister();
+  key = register_allocator()->NextConsecutiveRegister();
+  SuperPropertyReference* super_property =
+      property->obj()->AsSuperPropertyReference();
+  VisitForRegisterValue(super_property->this_var(), receiver);
+  VisitForRegisterValue(super_property->home_object(), home_object);
+  VisitForRegisterValue(property->key(), key);
+  BuildKeyedSuperPropertyLoad(receiver, home_object, key);
+
+  if (opt_receiver_out.is_valid()) {
+    builder()->MoveRegister(receiver, opt_receiver_out);
+  }
+}
+
+void BytecodeGenerator::VisitProperty(Property* expr) {
+  LhsKind property_kind = Property::GetAssignType(expr);
+  if (property_kind != NAMED_SUPER_PROPERTY &&
+      property_kind != KEYED_SUPER_PROPERTY) {
+    Register obj = VisitForRegisterValue(expr->obj());
+    VisitPropertyLoad(obj, expr);
+  } else {
+    VisitPropertyLoad(Register::invalid_value(), expr);
+  }
+}
 
 Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
   if (args->length() == 0) {
@@ -1510,18 +2339,21 @@
   return first_arg;
 }
 
-
 void BytecodeGenerator::VisitCall(Call* expr) {
   Expression* callee_expr = expr->expression();
   Call::CallType call_type = expr->GetCallType(isolate());
 
+  if (call_type == Call::SUPER_CALL) {
+    return VisitCallSuper(expr);
+  }
+
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
 
   // The receiver and arguments need to be allocated consecutively for
   // Call(). We allocate the callee and receiver consecutively for calls to
-  // kLoadLookupSlot. Future optimizations could avoid this there are no
-  // arguments or the receiver and arguments are already consecutive.
+  // %LoadLookupSlotForCall. Future optimizations could avoid this there are
+  // no arguments or the receiver and arguments are already consecutive.
   ZoneList<Expression*>* args = expr->arguments();
   register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
   Register callee = register_allocator()->NextConsecutiveRegister();
@@ -1551,18 +2383,16 @@
     case Call::POSSIBLY_EVAL_CALL: {
       if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
         RegisterAllocationScope inner_register_scope(this);
-        register_allocator()->PrepareForConsecutiveAllocations(2);
-        Register context = register_allocator()->NextConsecutiveRegister();
-        Register name = register_allocator()->NextConsecutiveRegister();
+        Register name = register_allocator()->NewRegister();
 
-        // Call LoadLookupSlot to get the callee and receiver.
+        // Call %LoadLookupSlotForCall to get the callee and receiver.
         DCHECK(Register::AreContiguous(callee, receiver));
         Variable* variable = callee_expr->AsVariableProxy()->var();
         builder()
-            ->MoveRegister(Register::function_context(), context)
-            .LoadLiteral(variable->name())
+            ->LoadLiteral(variable->name())
             .StoreAccumulatorInRegister(name)
-            .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+            .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name, 1,
+                                callee);
         break;
       }
       // Fall through.
@@ -1574,10 +2404,21 @@
       builder()->StoreAccumulatorInRegister(callee);
       break;
     }
-    case Call::NAMED_SUPER_PROPERTY_CALL:
-    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::NAMED_SUPER_PROPERTY_CALL: {
+      Property* property = callee_expr->AsProperty();
+      VisitNamedSuperPropertyLoad(property, receiver);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
+    case Call::KEYED_SUPER_PROPERTY_CALL: {
+      Property* property = callee_expr->AsProperty();
+      VisitKeyedSuperPropertyLoad(property, receiver);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
     case Call::SUPER_CALL:
-      UNIMPLEMENTED();
+      UNREACHABLE();
+      break;
   }
 
   // Evaluate all arguments to the function call and store in sequential
@@ -1615,12 +2456,39 @@
         .StoreAccumulatorInRegister(callee);
   }
 
-  // TODO(rmcilroy): Use CallIC to allow call type feedback.
-  builder()->Call(callee, receiver, args->length(),
-                  feedback_index(expr->CallFeedbackICSlot()));
+  builder()->SetExpressionPosition(expr);
+  builder()->Call(callee, receiver, 1 + args->length(),
+                  feedback_index(expr->CallFeedbackICSlot()),
+                  expr->tail_call_mode());
   execution_result()->SetResultInAccumulator();
 }
 
+void BytecodeGenerator::VisitCallSuper(Call* expr) {
+  RegisterAllocationScope register_scope(this);
+  SuperCallReference* super = expr->expression()->AsSuperCallReference();
+
+  // Prepare the constructor to the super call.
+  Register this_function = register_allocator()->NewRegister();
+  VisitForAccumulatorValue(super->this_function_var());
+  builder()
+      ->StoreAccumulatorInRegister(this_function)
+      .CallRuntime(Runtime::kInlineGetSuperConstructor, this_function, 1);
+
+  Register constructor = this_function;  // Re-use dead this_function register.
+  builder()->StoreAccumulatorInRegister(constructor);
+
+  ZoneList<Expression*>* args = expr->arguments();
+  Register first_arg = VisitArguments(args);
+
+  // The new target is loaded into the accumulator from the
+  // {new.target} variable.
+  VisitForAccumulatorValue(super->new_target_var());
+
+  // Call construct.
+  builder()->SetExpressionPosition(expr);
+  builder()->New(constructor, first_arg, args->length());
+  execution_result()->SetResultInAccumulator();
+}
 
 void BytecodeGenerator::VisitCallNew(CallNew* expr) {
   Register constructor = register_allocator()->NewRegister();
@@ -1629,27 +2497,31 @@
 
   ZoneList<Expression*>* args = expr->arguments();
   Register first_arg = VisitArguments(args);
-  builder()->New(constructor, first_arg, args->length());
+
+  builder()->SetExpressionPosition(expr);
+  // The accumulator holds new target which is the same as the
+  // constructor for CallNew.
+  builder()
+      ->LoadAccumulatorWithRegister(constructor)
+      .New(constructor, first_arg, args->length());
   execution_result()->SetResultInAccumulator();
 }
 
 
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  Register receiver;
   if (expr->is_jsruntime()) {
     // Allocate a register for the receiver and load it with undefined.
-    register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
-    receiver = register_allocator()->NextConsecutiveRegister();
+    register_allocator()->PrepareForConsecutiveAllocations(1 + args->length());
+    Register receiver = register_allocator()->NextConsecutiveRegister();
     builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
-  }
-  // Evaluate all arguments to the runtime call.
-  Register first_arg = VisitArguments(args);
-
-  if (expr->is_jsruntime()) {
-    DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
-    builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+    Register first_arg = VisitArguments(args);
+    CHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+    builder()->CallJSRuntime(expr->context_index(), receiver,
+                             1 + args->length());
   } else {
+    // Evaluate all arguments to the runtime call.
+    Register first_arg = VisitArguments(args);
     Runtime::FunctionId function_id = expr->function()->function_id;
     builder()->CallRuntime(function_id, first_arg, args->length());
   }
@@ -1755,7 +2627,11 @@
         break;
       }
       case VariableLocation::LOOKUP: {
-        builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
+        Register name_reg = register_allocator()->NewRegister();
+        builder()
+            ->LoadLiteral(variable->name())
+            .StoreAccumulatorInRegister(name_reg)
+            .CallRuntime(Runtime::kDeleteLookupSlot, name_reg, 1);
         break;
       }
       default:
@@ -1781,7 +2657,7 @@
   bool is_postfix = expr->is_postfix();
 
   // Evaluate LHS expression and get old value.
-  Register obj, key, old_value;
+  Register object, home_object, key, old_value, value;
   Handle<String> name;
   switch (assign_type) {
     case VARIABLE: {
@@ -1792,26 +2668,53 @@
     }
     case NAMED_PROPERTY: {
       FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-      obj = VisitForRegisterValue(property->obj());
+      object = VisitForRegisterValue(property->obj());
       name = property->key()->AsLiteral()->AsPropertyName();
-      builder()->LoadNamedProperty(obj, name, feedback_index(slot),
-                                   language_mode());
+      builder()->LoadNamedProperty(object, name, feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
       FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-      obj = VisitForRegisterValue(property->obj());
+      object = VisitForRegisterValue(property->obj());
       // Use visit for accumulator here since we need the key in the accumulator
       // for the LoadKeyedProperty.
       key = register_allocator()->NewRegister();
       VisitForAccumulatorValue(property->key());
       builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
-          obj, feedback_index(slot), language_mode());
+          object, feedback_index(slot));
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(key);
+      BuildNamedSuperPropertyLoad(object, home_object, key);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      BuildKeyedSuperPropertyLoad(object, home_object, key);
+      break;
+    }
   }
 
   // Convert old value into a number.
@@ -1826,29 +2729,36 @@
   }
 
   // Perform +1/-1 operation.
-  builder()->CountOperation(expr->binary_op(), language_mode_strength());
+  builder()->CountOperation(expr->binary_op());
 
   // Store the value.
   FeedbackVectorSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->expression()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, feedback_slot);
+      VisitVariableAssignment(variable, expr->op(), feedback_slot);
       break;
     }
     case NAMED_PROPERTY: {
-      builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+      builder()->StoreNamedProperty(object, name, feedback_index(feedback_slot),
                                     language_mode());
       break;
     }
     case KEYED_PROPERTY: {
-      builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+      builder()->StoreKeyedProperty(object, key, feedback_index(feedback_slot),
                                     language_mode());
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
   }
 
   // Restore old value for postfix expressions.
@@ -1881,7 +2791,7 @@
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
-  builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+  builder()->CompareOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
 
@@ -1889,7 +2799,7 @@
 void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
-  builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+  builder()->BinaryOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
 
@@ -1908,13 +2818,15 @@
 
 
 void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
-  UNIMPLEMENTED();
+  // Handled by VisitCall().
+  UNREACHABLE();
 }
 
 
 void BytecodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  UNIMPLEMENTED();
+  builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
+  execution_result()->SetResultInAccumulator();
 }
 
 
@@ -1962,8 +2874,7 @@
 }
 
 
-void BytecodeGenerator::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
@@ -2040,6 +2951,40 @@
   execution_result()->SetResultInAccumulator();
 }
 
+void BytecodeGenerator::VisitNewLocalWithContext() {
+  AccumulatorResultScope accumulator_execution_result(this);
+
+  register_allocator()->PrepareForConsecutiveAllocations(2);
+  Register extension_object = register_allocator()->NextConsecutiveRegister();
+  Register closure = register_allocator()->NextConsecutiveRegister();
+
+  builder()->StoreAccumulatorInRegister(extension_object);
+  VisitFunctionClosureForContext();
+  builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+      Runtime::kPushWithContext, extension_object, 2);
+  execution_result()->SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
+  AccumulatorResultScope accumulator_execution_result(this);
+  DCHECK(variable->IsContextSlot());
+
+  // Allocate a new local block context.
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+  Register name = register_allocator()->NextConsecutiveRegister();
+  Register exception = register_allocator()->NextConsecutiveRegister();
+  Register closure = register_allocator()->NextConsecutiveRegister();
+
+  builder()
+      ->StoreAccumulatorInRegister(exception)
+      .LoadLiteral(variable->name())
+      .StoreAccumulatorInRegister(name);
+  VisitFunctionClosureForContext();
+  builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+      Runtime::kPushCatchContext, name, 3);
+  execution_result()->SetResultInAccumulator();
+}
+
 
 void BytecodeGenerator::VisitObjectLiteralAccessor(
     Register home_object, ObjectLiteralProperty* property, Register value_out) {
@@ -2053,14 +2998,17 @@
   }
 }
 
-
 void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
                                            ObjectLiteralProperty* property,
                                            int slot_number) {
   Expression* expr = property->value();
-  if (!FunctionLiteral::NeedsHomeObject(expr)) return;
-
-  UNIMPLEMENTED();
+  if (FunctionLiteral::NeedsHomeObject(expr)) {
+    Handle<Name> name = isolate()->factory()->home_object_symbol();
+    FeedbackVectorSlot slot = property->GetSlot(slot_number);
+    builder()
+        ->LoadAccumulatorWithRegister(home_object)
+        .StoreNamedProperty(value, name, feedback_index(slot), language_mode());
+  }
 }
 
 
@@ -2076,19 +3024,26 @@
           ? CreateArgumentsType::kUnmappedArguments
           : CreateArgumentsType::kMappedArguments;
   builder()->CreateArguments(type);
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::ASSIGN,
+                          FeedbackVectorSlot::Invalid());
 }
 
+void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
+  if (rest == nullptr) return;
+
+  // Allocate and initialize a new rest parameter and assign to the {rest}
+  // variable.
+  builder()->CreateArguments(CreateArgumentsType::kRestParameter);
+  DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+  VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+}
 
 void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
   if (variable == nullptr) return;
 
-  // TODO(rmcilroy): Remove once we have tests which exercise this code path.
-  UNIMPLEMENTED();
-
   // Store the closure we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::function_closure());
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
 
@@ -2097,7 +3052,7 @@
 
   // Store the new target we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::new_target());
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
 
@@ -2114,6 +3069,12 @@
                           Context::NATIVE_CONTEXT_INDEX)
         .StoreAccumulatorInRegister(native_context)
         .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+  } else if (closure_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code. Fetch it from the context.
+    builder()->LoadContextSlot(execution_context()->reg(),
+                               Context::CLOSURE_INDEX);
   } else {
     DCHECK(closure_scope->is_function_scope());
     builder()->LoadAccumulatorWithRegister(Register::function_closure());
@@ -2128,6 +3089,13 @@
   Visit(expr);
 }
 
+void BytecodeGenerator::VisitForAccumulatorValueOrTheHole(Expression* expr) {
+  if (expr == nullptr) {
+    builder()->LoadTheHole();
+  } else {
+    VisitForAccumulatorValue(expr);
+  }
+}
 
 // Visits the expression |expr| and discards the result.
 void BytecodeGenerator::VisitForEffect(Expression* expr) {
@@ -2144,22 +3112,19 @@
   return register_scope.ResultRegister();
 }
 
+// Visits the expression |expr| and stores the expression result in
+// |destination|.
+void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
+                                              Register destination) {
+  AccumulatorResultScope register_scope(this);
+  Visit(expr);
+  builder()->StoreAccumulatorInRegister(destination);
+}
 
-Register BytecodeGenerator::NextContextRegister() const {
-  if (execution_context() == nullptr) {
-    // Return the incoming function context for the outermost execution context.
-    return Register::function_context();
-  }
-  Register previous = execution_context()->reg();
-  if (previous == Register::function_context()) {
-    // If the previous context was the incoming function context, then the next
-    // context register is the first local context register.
-    return builder_.first_context_register();
-  } else {
-    // Otherwise use the next local context register.
-    DCHECK_LT(previous.index(), builder_.last_context_register().index());
-    return Register(previous.index() + 1);
-  }
+void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
+  ContextScope context_scope(this, scope);
+  DCHECK(scope->declarations()->is_empty());
+  Visit(stmt);
 }
 
 
@@ -2168,11 +3133,6 @@
 }
 
 
-Strength BytecodeGenerator::language_mode_strength() const {
-  return strength(language_mode());
-}
-
-
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
   return info()->feedback_vector()->GetIndex(slot);
 }
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 8bda7be..4ef1738 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -13,6 +13,8 @@
 namespace internal {
 namespace interpreter {
 
+class LoopBuilder;
+
 class BytecodeGenerator final : public AstVisitor {
  public:
   BytecodeGenerator(Isolate* isolate, Zone* zone);
@@ -32,6 +34,9 @@
   class ControlScope;
   class ControlScopeForBreakable;
   class ControlScopeForIteration;
+  class ControlScopeForTopLevel;
+  class ControlScopeForTryCatch;
+  class ControlScopeForTryFinally;
   class ExpressionResultScope;
   class EffectResultScope;
   class AccumulatorResultScope;
@@ -39,7 +44,6 @@
   class RegisterAllocationScope;
 
   void MakeBytecodeBody();
-  Register NextContextRegister() const;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
@@ -61,6 +65,20 @@
   // Helper visitors which perform common operations.
   Register VisitArguments(ZoneList<Expression*>* arguments);
 
+  // Visit a keyed super property load. The optional
+  // |opt_receiver_out| register will have the receiver stored to it
+  // if it's a valid register. The loaded value is placed in the
+  // accumulator.
+  void VisitKeyedSuperPropertyLoad(Property* property,
+                                   Register opt_receiver_out);
+
+  // Visit a named super property load. The optional
+  // |opt_receiver_out| register will have the receiver stored to it
+  // if it's a valid register. The loaded value is placed in the
+  // accumulator.
+  void VisitNamedSuperPropertyLoad(Property* property,
+                                   Register opt_receiver_out);
+
   void VisitPropertyLoad(Register obj, Property* expr);
   void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
 
@@ -72,14 +90,41 @@
   MUST_USE_RESULT Register
   VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
                                     TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
-  void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+  void VisitVariableAssignment(Variable* variable, Token::Value op,
+                               FeedbackVectorSlot slot);
+
+  void BuildNamedSuperPropertyStore(Register receiver, Register home_object,
+                                    Register name, Register value);
+  void BuildKeyedSuperPropertyStore(Register receiver, Register home_object,
+                                    Register key, Register value);
+  void BuildNamedSuperPropertyLoad(Register receiver, Register home_object,
+                                   Register name);
+  void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
+                                   Register key);
+
+  void BuildThrowIfHole(Handle<String> name);
+  void BuildThrowIfNotHole(Handle<String> name);
+  void BuildThrowReassignConstant(Handle<String> name);
+  void BuildThrowReferenceError(Handle<String> name);
+  void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
+  void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
 
   void VisitArgumentsObject(Variable* variable);
+  void VisitRestArgumentsArray(Variable* rest);
+  void VisitCallSuper(Call* call);
+  void VisitClassLiteralContents(ClassLiteral* expr);
+  void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
+  void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+                                   Register prototype);
+  void VisitClassLiteralStaticPrototypeWithComputedName(Register name);
   void VisitThisFunctionVariable(Variable* variable);
   void VisitNewTargetVariable(Variable* variable);
   void VisitNewLocalFunctionContext();
   void VisitBuildLocalActivationContext();
+  void VisitBlockDeclarationsAndStatements(Block* stmt);
   void VisitNewLocalBlockContext(Scope* scope);
+  void VisitNewLocalCatchContext(Variable* variable);
+  void VisitNewLocalWithContext();
   void VisitFunctionClosureForContext();
   void VisitSetHomeObject(Register value, Register home_object,
                           ObjectLiteralProperty* property, int slot_number = 0);
@@ -88,17 +133,34 @@
                                   Register value_out);
   void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
 
+  // Visit the body of a loop iteration.
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
+
+  // Visit a statement and switch scopes, the context is in the accumulator.
+  void VisitInScope(Statement* stmt, Scope* scope);
+
   // Visitors for obtaining expression result in the accumulator, in a
   // register, or just getting the effect.
-  void VisitForAccumulatorValue(Expression* expression);
-  MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
-  void VisitForEffect(Expression* node);
+  void VisitForAccumulatorValue(Expression* expr);
+  void VisitForAccumulatorValueOrTheHole(Expression* expr);
+  MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
+  void VisitForRegisterValue(Expression* expr, Register destination);
+  void VisitForEffect(Expression* expr);
 
   // Methods for tracking and remapping register.
   void RecordStoreToRegister(Register reg);
   Register LoadFromAliasedRegister(Register reg);
 
-  inline BytecodeArrayBuilder* builder() { return &builder_; }
+  // Methods for tracking try-block nesting.
+  bool IsInsideTryCatch() const { return try_catch_nesting_level_ > 0; }
+  bool IsInsideTryFinally() const { return try_finally_nesting_level_ > 0; }
+
+  // Initialize an array of temporary registers with consecutive registers.
+  template <size_t N>
+  void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
+
+  inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
+  inline BytecodeArrayBuilder* builder() const { return builder_; }
 
   inline Isolate* isolate() const { return isolate_; }
   inline Zone* zone() const { return zone_; }
@@ -130,12 +192,11 @@
 
   ZoneVector<Handle<Object>>* globals() { return &globals_; }
   inline LanguageMode language_mode() const;
-  Strength language_mode_strength() const;
   int feedback_index(FeedbackVectorSlot slot) const;
 
   Isolate* isolate_;
   Zone* zone_;
-  BytecodeArrayBuilder builder_;
+  BytecodeArrayBuilder* builder_;
   CompilationInfo* info_;
   Scope* scope_;
   ZoneVector<Handle<Object>> globals_;
@@ -143,6 +204,8 @@
   ContextScope* execution_context_;
   ExpressionResultScope* execution_result_;
   RegisterAllocationScope* register_allocator_;
+  int try_catch_nesting_level_;
+  int try_finally_nesting_level_;
 };
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 4efb612..0a617c0 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -10,17 +10,173 @@
 namespace internal {
 namespace interpreter {
 
+TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
+                                                       int allocation_base)
+    : free_temporaries_(zone),
+      allocation_base_(allocation_base),
+      allocation_count_(0) {}
+
+Register TemporaryRegisterAllocator::first_temporary_register() const {
+  DCHECK(allocation_count() > 0);
+  return Register(allocation_base());
+}
+
+Register TemporaryRegisterAllocator::last_temporary_register() const {
+  DCHECK(allocation_count() > 0);
+  return Register(allocation_base() + allocation_count() - 1);
+}
+
+int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
+  allocation_count_ += 1;
+  return allocation_base() + allocation_count() - 1;
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegister() {
+  if (free_temporaries_.empty()) {
+    return AllocateTemporaryRegister();
+  } else {
+    auto pos = free_temporaries_.begin();
+    int retval = *pos;
+    free_temporaries_.erase(pos);
+    return retval;
+  }
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange(
+    int start_index, int end_index) {
+  if (free_temporaries_.empty()) {
+    int next_allocation = allocation_base() + allocation_count();
+    while (next_allocation >= start_index && next_allocation <= end_index) {
+      free_temporaries_.insert(AllocateTemporaryRegister());
+      next_allocation += 1;
+    }
+    return AllocateTemporaryRegister();
+  }
+
+  ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index);
+  if (index == free_temporaries_.begin()) {
+    // If start_index is the first free register, check for a register
+    // greater than end_index.
+    index = free_temporaries_.upper_bound(end_index);
+    if (index == free_temporaries_.end()) {
+      return AllocateTemporaryRegister();
+    }
+  } else {
+    // If there is a free register < start_index
+    index--;
+  }
+
+  int retval = *index;
+  free_temporaries_.erase(index);
+  return retval;
+}
+
+int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
+    size_t count) {
+  if (count == 0) {
+    return -1;
+  }
+
+  // TODO(oth): replace use of set<> here for free_temporaries with a
+  // more efficient structure. And/or partition into two searches -
+  // one before the translation window and one after.
+
+  // A run will require at least |count| free temporaries.
+  while (free_temporaries_.size() < count) {
+    free_temporaries_.insert(AllocateTemporaryRegister());
+  }
+
+  // Search within existing temporaries for a run.
+  auto start = free_temporaries_.begin();
+  size_t run_length = 0;
+  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+    int expected = *start + static_cast<int>(run_length);
+    if (*run_end != expected) {
+      start = run_end;
+      run_length = 0;
+    }
+    Register reg_start(*start);
+    Register reg_expected(expected);
+    if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
+        RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
+      // Run straddles the lower edge of the translation window. Registers
+      // after the start of this boundary are displaced by the register
+      // translator to provide a hole for translation. Runs either side
+      // of the boundary are fine.
+      start = run_end;
+      run_length = 0;
+    }
+    if (++run_length == count) {
+      return *start;
+    }
+  }
+
+  // Continue run if possible across existing last temporary.
+  if (allocation_count_ > 0 && (start == free_temporaries_.end() ||
+                                *start + static_cast<int>(run_length) !=
+                                    last_temporary_register().index() + 1)) {
+    run_length = 0;
+  }
+
+  // Pad temporaries if extended run would cross translation boundary.
+  Register reg_first(*start);
+  Register reg_last(*start + static_cast<int>(count) - 1);
+  DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
+            RegisterTranslator::DistanceToTranslationWindow(reg_last));
+  while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
+         RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
+    auto pos_insert_pair =
+        free_temporaries_.insert(AllocateTemporaryRegister());
+    reg_first = Register(*pos_insert_pair.first);
+    reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
+    run_length = 0;
+  }
+
+  // Ensure enough registers for run.
+  while (run_length++ < count) {
+    free_temporaries_.insert(AllocateTemporaryRegister());
+  }
+
+  int run_start =
+      last_temporary_register().index() - static_cast<int>(count) + 1;
+  DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
+             0 ||
+         RegisterTranslator::DistanceToTranslationWindow(
+             Register(run_start + static_cast<int>(count) - 1)) > 0);
+  return run_start;
+}
+
+bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const {
+  if (allocation_count_ > 0) {
+    DCHECK(reg >= first_temporary_register() &&
+           reg <= last_temporary_register());
+    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+  } else {
+    return false;
+  }
+}
+
+void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
+    int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+  free_temporaries_.erase(reg_index);
+}
+
+void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+  free_temporaries_.insert(reg_index);
+}
+
 BytecodeRegisterAllocator::BytecodeRegisterAllocator(
-    BytecodeArrayBuilder* builder)
-    : builder_(builder),
-      allocated_(builder->zone()),
+    Zone* zone, TemporaryRegisterAllocator* allocator)
+    : base_allocator_(allocator),
+      allocated_(zone),
       next_consecutive_register_(-1),
       next_consecutive_count_(-1) {}
 
-
 BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
   for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
-    builder_->ReturnTemporaryRegister(*i);
+    base_allocator()->ReturnTemporaryRegister(*i);
   }
   allocated_.clear();
 }
@@ -29,9 +185,9 @@
 Register BytecodeRegisterAllocator::NewRegister() {
   int allocated = -1;
   if (next_consecutive_count_ <= 0) {
-    allocated = builder_->BorrowTemporaryRegister();
+    allocated = base_allocator()->BorrowTemporaryRegister();
   } else {
-    allocated = builder_->BorrowTemporaryRegisterNotInRange(
+    allocated = base_allocator()->BorrowTemporaryRegisterNotInRange(
         next_consecutive_register_,
         next_consecutive_register_ + next_consecutive_count_ - 1);
   }
@@ -52,7 +208,7 @@
 void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
   if (static_cast<int>(count) > next_consecutive_count_) {
     next_consecutive_register_ =
-        builder_->PrepareForConsecutiveTemporaryRegisters(count);
+        base_allocator()->PrepareForConsecutiveTemporaryRegisters(count);
     next_consecutive_count_ = static_cast<int>(count);
   }
 }
@@ -61,7 +217,8 @@
 Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
   DCHECK_GE(next_consecutive_register_, 0);
   DCHECK_GT(next_consecutive_count_, 0);
-  builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+  base_allocator()->BorrowConsecutiveTemporaryRegister(
+      next_consecutive_register_);
   allocated_.push_back(next_consecutive_register_);
   next_consecutive_count_--;
   return Register(next_consecutive_register_++);
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index 74ab3a4..696a3b1 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -5,6 +5,7 @@
 #ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 #define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 
+#include "src/interpreter/bytecodes.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -14,26 +15,82 @@
 class BytecodeArrayBuilder;
 class Register;
 
+class TemporaryRegisterAllocator final {
+ public:
+  TemporaryRegisterAllocator(Zone* zone, int start_index);
+
+  // Borrow a temporary register.
+  int BorrowTemporaryRegister();
+
+  // Borrow a temporary register from the register range outside of
+  // |start_index| to |end_index|.
+  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
+
+  // Return a temporary register when no longer used.
+  void ReturnTemporaryRegister(int reg_index);
+
+  // Ensure a run of consecutive registers is available. Each register in
+  // the range should be borrowed with BorrowConsecutiveTemporaryRegister().
+  // Returns the start index of the run.
+  int PrepareForConsecutiveTemporaryRegisters(size_t count);
+
+  // Borrow a register from a range prepared with
+  // PrepareForConsecutiveTemporaryRegisters().
+  void BorrowConsecutiveTemporaryRegister(int reg_index);
+
+  // Returns true if |reg| is a temporary register and is currently
+  // borrowed.
+  bool RegisterIsLive(Register reg) const;
+
+  // Returns the first register in the range of temporary registers.
+  Register first_temporary_register() const;
+
+  // Returns the last register in the range of temporary registers.
+  Register last_temporary_register() const;
+
+  // Returns the start index of temporary register allocations.
+  int allocation_base() const { return allocation_base_; }
+
+  // Returns the number of temporary register allocations made.
+  int allocation_count() const { return allocation_count_; }
+
+ private:
+  // Allocate a temporary register.
+  int AllocateTemporaryRegister();
+
+  ZoneSet<int> free_temporaries_;
+  int allocation_base_;
+  int allocation_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
+};
+
 // A class than allows the instantiator to allocate temporary registers that are
 // cleaned up when scope is closed.
-class BytecodeRegisterAllocator {
+class BytecodeRegisterAllocator final {
  public:
-  explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+  explicit BytecodeRegisterAllocator(Zone* zone,
+                                     TemporaryRegisterAllocator* allocator);
   ~BytecodeRegisterAllocator();
   Register NewRegister();
 
+  // Ensure |count| consecutive allocations are available.
   void PrepareForConsecutiveAllocations(size_t count);
+
+  // Get the next consecutive allocation after calling
+  // PrepareForConsecutiveAllocations.
   Register NextConsecutiveRegister();
 
+  // Returns true if |reg| is allocated in this allocator.
   bool RegisterIsAllocatedInThisScope(Register reg) const;
 
+  // Returns true if unused consecutive allocations remain.
   bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
 
  private:
-  void* operator new(size_t size);
-  void operator delete(void* p);
+  TemporaryRegisterAllocator* base_allocator() const { return base_allocator_; }
 
-  BytecodeArrayBuilder* builder_;
+  TemporaryRegisterAllocator* base_allocator_;
   ZoneVector<int> allocated_;
   int next_consecutive_register_;
   int next_consecutive_count_;
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index fd778d7..b813605 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -28,6 +28,18 @@
 OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
 #undef DECLARE_OPERAND_SIZE
 
+template <OperandType>
+struct RegisterOperandTraits {
+  static const int kIsRegisterOperand = 0;
+};
+
+#define DECLARE_REGISTER_OPERAND(Name, _)              \
+  template <>                                          \
+  struct RegisterOperandTraits<OperandType::k##Name> { \
+    static const int kIsRegisterOperand = 1;           \
+  };
+REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
+#undef DECLARE_REGISTER_OPERAND
 
 template <OperandType... Args>
 struct BytecodeTraits {};
@@ -63,13 +75,28 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
+           operand_3 == ot;
+  }
+
   static const int kOperandCount = 4;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_2>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_3>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+      (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
+      (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
       OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
 };
 
-
 template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
 struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
   static inline OperandType GetOperandType(int i) {
@@ -96,7 +123,20 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot || operand_2 == ot;
+  }
+
   static const int kOperandCount = 3;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_2>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+      (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
       OperandTraits<operand_2>::kSize;
@@ -126,7 +166,18 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot;
+  }
+
   static const int kOperandCount = 2;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
 };
@@ -148,7 +199,16 @@
     return 1;
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot;
+  }
+
   static const int kOperandCount = 1;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand;
   static const int kSize = 1 + OperandTraits<operand_0>::kSize;
 };
 
@@ -169,7 +229,14 @@
     return 1;
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return false;
+  }
+
   static const int kOperandCount = 0;
+  static const int kRegisterOperandCount = 0;
+  static const int kRegisterOperandBitmap = 0;
   static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
 };
 
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 2d4406c..c3b17c7 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -57,6 +57,7 @@
 
 // static
 uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
   return static_cast<uint8_t>(bytecode);
 }
 
@@ -70,6 +71,21 @@
 
 
 // static
+Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
+  switch (Size(bytecode)) {
+#define CASE(Name, ...)                                  \
+  case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
+    return Bytecode::k##Name;
+    DEBUG_BREAK_BYTECODE_LIST(CASE)
+#undef CASE
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return static_cast<Bytecode>(-1);
+}
+
+// static
 int Bytecodes::Size(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -100,6 +116,21 @@
 
 
 // static
+int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)                                            \
+  case Bytecode::k##Name:                                          \
+    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+    return Name##Trait::kRegisterOperandCount;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -130,6 +161,21 @@
 
 
 // static
+int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)                                            \
+  case Bytecode::k##Name:                                          \
+    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+    return Name##Trait::kRegisterOperandBitmap;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
 int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -164,6 +210,7 @@
          bytecode == Bytecode::kJumpIfFalse ||
          bytecode == Bytecode::kJumpIfToBooleanTrue ||
          bytecode == Bytecode::kJumpIfToBooleanFalse ||
+         bytecode == Bytecode::kJumpIfNotHole ||
          bytecode == Bytecode::kJumpIfNull ||
          bytecode == Bytecode::kJumpIfUndefined;
 }
@@ -175,6 +222,7 @@
          bytecode == Bytecode::kJumpIfFalseConstant ||
          bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
          bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+         bytecode == Bytecode::kJumpIfNotHoleConstant ||
          bytecode == Bytecode::kJumpIfNullConstant ||
          bytecode == Bytecode::kJumpIfUndefinedConstant;
 }
@@ -186,6 +234,7 @@
          bytecode == Bytecode::kJumpIfFalseConstantWide ||
          bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
          bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+         bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
          bytecode == Bytecode::kJumpIfNullConstantWide ||
          bytecode == Bytecode::kJumpIfUndefinedConstantWide;
 }
@@ -227,10 +276,122 @@
 
 
 // static
+bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
+  return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
+         bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
+         bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+}
+
+// static
+bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
+  switch (bytecode) {
+#define CASE(Name, ...) case Bytecode::k##Name:
+    DEBUG_BREAK_BYTECODE_LIST(CASE);
+#undef CASE
+    return true;
+    default:
+      break;
+  }
+  return false;
+}
+
+// static
 bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn || IsJump(bytecode);
 }
 
+// static
+bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
+  return operand_type == OperandType::kIdx8 ||
+         operand_type == OperandType::kIdx16;
+}
+
+// static
+bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
+  return operand_type == OperandType::kImm8;
+}
+
+// static
+bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
+  return (operand_type == OperandType::kRegCount8 ||
+          operand_type == OperandType::kRegCount16);
+}
+
+// static
+bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
+  return (operand_type == OperandType::kMaybeReg8 ||
+          operand_type == OperandType::kMaybeReg16);
+}
+
+// static
+bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+// static
+bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+    REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+// static
+bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+    REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+namespace {
+static Register DecodeRegister(const uint8_t* operand_start,
+                               OperandType operand_type) {
+  switch (Bytecodes::SizeOfOperand(operand_type)) {
+    case OperandSize::kByte:
+      return Register::FromOperand(*operand_start);
+    case OperandSize::kShort:
+      return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+    case OperandSize::kNone: {
+      UNREACHABLE();
+    }
+  }
+  return Register();
+}
+}  // namespace
+
 
 // static
 std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
@@ -251,16 +412,20 @@
 
   os << bytecode << " ";
 
+  // Operands for the debug break are from the original instruction.
+  if (IsDebugBreak(bytecode)) return os;
+
   int number_of_operands = NumberOfOperands(bytecode);
+  int range = 0;
   for (int i = 0; i < number_of_operands; i++) {
     OperandType op_type = GetOperandType(bytecode, i);
     const uint8_t* operand_start =
         &bytecode_start[GetOperandOffset(bytecode, i)];
     switch (op_type) {
-      case interpreter::OperandType::kCount8:
+      case interpreter::OperandType::kRegCount8:
         os << "#" << static_cast<unsigned int>(*operand_start);
         break;
-      case interpreter::OperandType::kCount16:
+      case interpreter::OperandType::kRegCount16:
         os << '#' << ReadUnalignedUInt16(operand_start);
         break;
       case interpreter::OperandType::kIdx8:
@@ -272,48 +437,28 @@
       case interpreter::OperandType::kImm8:
         os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
         break;
+      case interpreter::OperandType::kMaybeReg8:
+      case interpreter::OperandType::kMaybeReg16:
       case interpreter::OperandType::kReg8:
-      case interpreter::OperandType::kMaybeReg8: {
-        Register reg = Register::FromOperand(*operand_start);
-        if (reg.is_function_context()) {
-          os << "<context>";
-        } else if (reg.is_function_closure()) {
-          os << "<closure>";
-        } else if (reg.is_new_target()) {
-          os << "<new.target>";
-        } else if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          if (parameter_index == 0) {
-            os << "<this>";
-          } else {
-            os << "a" << parameter_index - 1;
-          }
-        } else {
-          os << "r" << reg.index();
-        }
+      case interpreter::OperandType::kReg16:
+      case interpreter::OperandType::kRegOut8:
+      case interpreter::OperandType::kRegOut16: {
+        Register reg = DecodeRegister(operand_start, op_type);
+        os << reg.ToString(parameter_count);
         break;
       }
-      case interpreter::OperandType::kRegPair8: {
-        Register reg = Register::FromOperand(*operand_start);
-        if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          DCHECK_NE(parameter_index, 0);
-          os << "a" << parameter_index - 1 << "-" << parameter_index;
-        } else {
-          os << "r" << reg.index() << "-" << reg.index() + 1;
-        }
-        break;
-      }
-      case interpreter::OperandType::kReg16: {
-        Register reg =
-            Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
-        if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          DCHECK_NE(parameter_index, 0);
-          os << "a" << parameter_index - 1;
-        } else {
-          os << "r" << reg.index();
-        }
+      case interpreter::OperandType::kRegOutTriple8:
+      case interpreter::OperandType::kRegOutTriple16:
+        range += 1;
+      case interpreter::OperandType::kRegOutPair8:
+      case interpreter::OperandType::kRegOutPair16:
+      case interpreter::OperandType::kRegPair8:
+      case interpreter::OperandType::kRegPair16: {
+        range += 1;
+        Register first_reg = DecodeRegister(operand_start, op_type);
+        Register last_reg = Register(first_reg.index() + range);
+        os << first_reg.ToString(parameter_count) << "-"
+           << last_reg.ToString(parameter_count);
         break;
       }
       case interpreter::OperandType::kNone:
@@ -327,7 +472,6 @@
   return os;
 }
 
-
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
   return os << Bytecodes::ToString(bytecode);
 }
@@ -342,22 +486,33 @@
   return os << Bytecodes::OperandSizeToString(operand_size);
 }
 
-
 static const int kLastParamRegisterIndex =
     -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
 static const int kFunctionClosureRegisterIndex =
     -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
-static const int kFunctionContextRegisterIndex =
+static const int kCurrentContextRegisterIndex =
     -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
 static const int kNewTargetRegisterIndex =
     -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
 
+// The register space is a signed 16-bit space. Register operands
+// occupy range above 0. Parameter indices are biased with the
+// negative value kLastParamRegisterIndex for ease of access in the
+// interpreter.
+static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
+static const int kMaxRegisterIndex = -kMinInt16;
+static const int kMaxReg8Index = -kMinInt8;
+static const int kMinReg8Index = -kMaxInt8;
+static const int kMaxReg16Index = -kMinInt16;
+static const int kMinReg16Index = -kMaxInt16;
 
-// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
-// Parameter indices are biased with the negative value kLastParamRegisterIndex
-// for ease of access in the interpreter.
-static const int kMaxParameterIndex = 128 + kLastParamRegisterIndex;
+bool Register::is_byte_operand() const {
+  return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+}
 
+bool Register::is_short_operand() const {
+  return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+}
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
@@ -365,7 +520,6 @@
   DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
   int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
   DCHECK_LT(register_index, 0);
-  DCHECK_GE(register_index, kMinInt8);
   return Register(register_index);
 }
 
@@ -386,13 +540,13 @@
 }
 
 
-Register Register::function_context() {
-  return Register(kFunctionContextRegisterIndex);
+Register Register::current_context() {
+  return Register(kCurrentContextRegisterIndex);
 }
 
 
-bool Register::is_function_context() const {
-  return index() == kFunctionContextRegisterIndex;
+bool Register::is_current_context() const {
+  return index() == kCurrentContextRegisterIndex;
 }
 
 
@@ -403,13 +557,14 @@
   return index() == kNewTargetRegisterIndex;
 }
 
-
 int Register::MaxParameterIndex() { return kMaxParameterIndex; }
 
+int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
+
+int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
 
 uint8_t Register::ToOperand() const {
-  DCHECK_GE(index_, kMinInt8);
-  DCHECK_LE(index_, kMaxInt8);
+  DCHECK(is_byte_operand());
   return static_cast<uint8_t>(-index_);
 }
 
@@ -420,8 +575,7 @@
 
 
 uint16_t Register::ToWideOperand() const {
-  DCHECK_GE(index_, kMinInt16);
-  DCHECK_LE(index_, kMaxInt16);
+  DCHECK(is_short_operand());
   return static_cast<uint16_t>(-index_);
 }
 
@@ -431,6 +585,16 @@
 }
 
 
+uint32_t Register::ToRawOperand() const {
+  return static_cast<uint32_t>(-index_);
+}
+
+
+Register Register::FromRawOperand(uint32_t operand) {
+  return Register(-static_cast<int32_t>(operand));
+}
+
+
 bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
                              Register reg4, Register reg5) {
   if (reg1.index() + 1 != reg2.index()) {
@@ -448,6 +612,29 @@
   return true;
 }
 
+std::string Register::ToString(int parameter_count) {
+  if (is_current_context()) {
+    return std::string("<context>");
+  } else if (is_function_closure()) {
+    return std::string("<closure>");
+  } else if (is_new_target()) {
+    return std::string("<new.target>");
+  } else if (is_parameter()) {
+    int parameter_index = ToParameterIndex(parameter_count);
+    if (parameter_index == 0) {
+      return std::string("<this>");
+    } else {
+      std::ostringstream s;
+      s << "a" << parameter_index - 1;
+      return s.str();
+    }
+  } else {
+    std::ostringstream s;
+    s << "r" << index();
+    return s.str();
+  }
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index a9beb6c..d4863b1 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -15,24 +15,65 @@
 namespace internal {
 namespace interpreter {
 
+#define INVALID_OPERAND_TYPE_LIST(V) \
+  V(None, OperandSize::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */                      \
+  V(MaybeReg8, OperandSize::kByte)          \
+  V(Reg8, OperandSize::kByte)               \
+  V(RegPair8, OperandSize::kByte)           \
+  /* Short operands. */                     \
+  V(MaybeReg16, OperandSize::kShort)        \
+  V(Reg16, OperandSize::kShort)             \
+  V(RegPair16, OperandSize::kShort)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */                       \
+  V(RegOut8, OperandSize::kByte)             \
+  V(RegOutPair8, OperandSize::kByte)         \
+  V(RegOutTriple8, OperandSize::kByte)       \
+  /* Short operands. */                      \
+  V(RegOut16, OperandSize::kShort)           \
+  V(RegOutPair16, OperandSize::kShort)       \
+  V(RegOutTriple16, OperandSize::kShort)
+
+#define SCALAR_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */              \
+  V(Idx8, OperandSize::kByte)       \
+  V(Imm8, OperandSize::kByte)       \
+  V(RegCount8, OperandSize::kByte)  \
+  /* Short operands. */             \
+  V(Idx16, OperandSize::kShort)     \
+  V(RegCount16, OperandSize::kShort)
+
+#define REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+  REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
+
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  INVALID_OPERAND_TYPE_LIST(V)            \
+  SCALAR_OPERAND_TYPE_LIST(V)
+
 // The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V)       \
-                                   \
-  /* None operand. */              \
-  V(None, OperandSize::kNone)      \
-                                   \
-  /* Byte operands. */             \
-  V(Count8, OperandSize::kByte)    \
-  V(Imm8, OperandSize::kByte)      \
-  V(Idx8, OperandSize::kByte)      \
-  V(MaybeReg8, OperandSize::kByte) \
-  V(Reg8, OperandSize::kByte)      \
-  V(RegPair8, OperandSize::kByte)  \
-                                   \
-  /* Short operands. */            \
-  V(Count16, OperandSize::kShort)  \
-  V(Idx16, OperandSize::kShort)    \
-  V(Reg16, OperandSize::kShort)
+#define OPERAND_TYPE_LIST(V)        \
+  NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_OPERAND_TYPE_LIST(V)
+
+// Define one debug break bytecode for each operands size.
+#define DEBUG_BREAK_BYTECODE_LIST(V)                                           \
+  V(DebugBreak0, OperandType::kNone)                                           \
+  V(DebugBreak1, OperandType::kReg8)                                           \
+  V(DebugBreak2, OperandType::kReg16)                                          \
+  V(DebugBreak3, OperandType::kReg16, OperandType::kReg8)                      \
+  V(DebugBreak4, OperandType::kReg16, OperandType::kReg16)                     \
+  V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
+  V(DebugBreak6, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16)                                                       \
+  V(DebugBreak7, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16, OperandType::kReg8)                                   \
+  V(DebugBreak8, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16, OperandType::kReg16)
 
 // The list of bytecodes which are interpreted by the interpreter.
 #define BYTECODE_LIST(V)                                                       \
@@ -49,14 +90,10 @@
   V(LdaConstantWide, OperandType::kIdx16)                                      \
                                                                                \
   /* Globals */                                                                \
-  V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8)       \
-  V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8)       \
-  V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
-  V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+  V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8)                         \
+  V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8)             \
+  V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16)                   \
+  V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16)       \
   V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
   V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
   V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
@@ -82,25 +119,17 @@
                                                                                \
   /* Register-accumulator transfers */                                         \
   V(Ldar, OperandType::kReg8)                                                  \
-  V(Star, OperandType::kReg8)                                                  \
+  V(Star, OperandType::kRegOut8)                                               \
                                                                                \
   /* Register-register transfers */                                            \
-  V(Mov, OperandType::kReg8, OperandType::kReg8)                               \
-  V(Exchange, OperandType::kReg8, OperandType::kReg16)                         \
-  V(ExchangeWide, OperandType::kReg16, OperandType::kReg16)                    \
+  V(Mov, OperandType::kReg8, OperandType::kRegOut8)                            \
+  V(MovWide, OperandType::kReg16, OperandType::kRegOut16)                      \
                                                                                \
   /* LoadIC operations */                                                      \
-  V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
-  V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
-  V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8)                 \
-  V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8)                 \
-  /* TODO(rmcilroy): Wide register operands too? */                            \
-  V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                 \
-    OperandType::kIdx16)                                                       \
-  V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16,                 \
-    OperandType::kIdx16)                                                       \
-  V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16)            \
-  V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16)            \
+  V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)        \
+  V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8)                       \
+  V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16)  \
+  V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16)                  \
                                                                                \
   /* StoreIC operations */                                                     \
   V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
@@ -109,7 +138,6 @@
     OperandType::kIdx8)                                                        \
   V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8,                \
     OperandType::kIdx8)                                                        \
-  /* TODO(rmcilroy): Wide register operands too? */                            \
   V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                \
     OperandType::kIdx16)                                                       \
   V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16,                \
@@ -139,22 +167,33 @@
   V(TypeOf, OperandType::kNone)                                                \
   V(DeletePropertyStrict, OperandType::kReg8)                                  \
   V(DeletePropertySloppy, OperandType::kReg8)                                  \
-  V(DeleteLookupSlot, OperandType::kNone)                                      \
                                                                                \
   /* Call operations */                                                        \
-  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8,        \
+  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8,     \
     OperandType::kIdx8)                                                        \
-  V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16,   \
-    OperandType::kIdx16)                                                       \
+  V(CallWide, OperandType::kReg16, OperandType::kReg16,                        \
+    OperandType::kRegCount16, OperandType::kIdx16)                             \
+  V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
+    OperandType::kIdx8)                                                        \
+  V(TailCallWide, OperandType::kReg16, OperandType::kReg16,                    \
+    OperandType::kRegCount16, OperandType::kIdx16)                             \
   V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8,                 \
-    OperandType::kCount8)                                                      \
+    OperandType::kRegCount8)                                                   \
+  V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16,            \
+    OperandType::kRegCount8)                                                   \
   V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8,          \
-    OperandType::kCount8, OperandType::kRegPair8)                              \
+    OperandType::kRegCount8, OperandType::kRegOutPair8)                        \
+  V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16,     \
+    OperandType::kRegCount8, OperandType::kRegOutPair16)                       \
   V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8,                    \
-    OperandType::kCount8)                                                      \
+    OperandType::kRegCount8)                                                   \
+  V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16,               \
+    OperandType::kRegCount16)                                                  \
                                                                                \
   /* New operator */                                                           \
-  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8)    \
+  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
+  V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16,                    \
+    OperandType::kRegCount16)                                                  \
                                                                                \
   /* Test Operators */                                                         \
   V(TestEqual, OperandType::kReg8)                                             \
@@ -194,6 +233,7 @@
   /* Arguments allocation */                                                   \
   V(CreateMappedArguments, OperandType::kNone)                                 \
   V(CreateUnmappedArguments, OperandType::kNone)                               \
+  V(CreateRestParameter, OperandType::kNone)                                   \
                                                                                \
   /* Control Flow */                                                           \
   V(Jump, OperandType::kImm8)                                                  \
@@ -217,18 +257,30 @@
   V(JumpIfUndefined, OperandType::kImm8)                                       \
   V(JumpIfUndefinedConstant, OperandType::kIdx8)                               \
   V(JumpIfUndefinedConstantWide, OperandType::kIdx16)                          \
+  V(JumpIfNotHole, OperandType::kImm8)                                         \
+  V(JumpIfNotHoleConstant, OperandType::kIdx8)                                 \
+  V(JumpIfNotHoleConstantWide, OperandType::kIdx16)                            \
                                                                                \
   /* Complex flow control For..in */                                           \
-  V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8)  \
+  V(ForInPrepare, OperandType::kRegOutTriple8)                                 \
+  V(ForInPrepareWide, OperandType::kRegOutTriple16)                            \
   V(ForInDone, OperandType::kReg8, OperandType::kReg8)                         \
-  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8,     \
-    OperandType::kReg8)                                                        \
+  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
+  V(ForInNextWide, OperandType::kReg16, OperandType::kReg16,                   \
+    OperandType::kRegPair16)                                                   \
   V(ForInStep, OperandType::kReg8)                                             \
                                                                                \
+  /* Perform a stack guard check */                                            \
+  V(StackCheck, OperandType::kNone)                                            \
+                                                                               \
   /* Non-local flow control */                                                 \
   V(Throw, OperandType::kNone)                                                 \
-  V(Return, OperandType::kNone)
-
+  V(ReThrow, OperandType::kNone)                                               \
+  V(Return, OperandType::kNone)                                                \
+                                                                               \
+  /* Debugger */                                                               \
+  V(Debugger, OperandType::kNone)                                              \
+  DEBUG_BREAK_BYTECODE_LIST(V)
 
 // Enumeration of the size classes of operand types used by bytecodes.
 enum class OperandSize : uint8_t {
@@ -268,28 +320,30 @@
 // in its stack-frame. Register hold parameters, this, and expression values.
 class Register {
  public:
-  Register() : index_(kIllegalIndex) {}
+  explicit Register(int index = kInvalidIndex) : index_(index) {}
 
-  explicit Register(int index) : index_(index) {}
-
-  int index() const {
-    DCHECK(index_ != kIllegalIndex);
-    return index_;
-  }
+  int index() const { return index_; }
   bool is_parameter() const { return index() < 0; }
-  bool is_valid() const { return index_ != kIllegalIndex; }
+  bool is_valid() const { return index_ != kInvalidIndex; }
+  bool is_byte_operand() const;
+  bool is_short_operand() const;
 
   static Register FromParameterIndex(int index, int parameter_count);
   int ToParameterIndex(int parameter_count) const;
   static int MaxParameterIndex();
+  static int MaxRegisterIndex();
+  static int MaxRegisterIndexForByteOperand();
+
+  // Returns an invalid register.
+  static Register invalid_value() { return Register(); }
 
   // Returns the register for the function's closure object.
   static Register function_closure();
   bool is_function_closure() const;
 
-  // Returns the register for the function's outer context.
-  static Register function_context();
-  bool is_function_context() const;
+  // Returns the register which holds the current context object.
+  static Register current_context();
+  bool is_current_context() const;
 
   // Returns the register for the incoming new target value.
   static Register new_target();
@@ -301,11 +355,16 @@
   static Register FromWideOperand(uint16_t operand);
   uint16_t ToWideOperand() const;
 
+  static Register FromRawOperand(uint32_t raw_operand);
+  uint32_t ToRawOperand() const;
+
   static bool AreContiguous(Register reg1, Register reg2,
                             Register reg3 = Register(),
                             Register reg4 = Register(),
                             Register reg5 = Register());
 
+  std::string ToString(int parameter_count);
+
   bool operator==(const Register& other) const {
     return index() == other.index();
   }
@@ -318,9 +377,15 @@
   bool operator<=(const Register& other) const {
     return index() <= other.index();
   }
+  bool operator>(const Register& other) const {
+    return index() > other.index();
+  }
+  bool operator>=(const Register& other) const {
+    return index() >= other.index();
+  }
 
  private:
-  static const int kIllegalIndex = kMaxInt;
+  static const int kInvalidIndex = kMaxInt;
 
   void* operator new(size_t size);
   void operator delete(void* p);
@@ -349,57 +414,96 @@
   // Returns the number of operands expected by |bytecode|.
   static int NumberOfOperands(Bytecode bytecode);
 
-  // Return the i-th operand of |bytecode|.
+  // Returns the number of register operands expected by |bytecode|.
+  static int NumberOfRegisterOperands(Bytecode bytecode);
+
+  // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
-  // Return the size of the i-th operand of |bytecode|.
+  // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i);
 
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
   static int GetOperandOffset(Bytecode bytecode, int i);
 
+  // Returns a zero-based bitmap of the register operand positions of
+  // |bytecode|.
+  static int GetRegisterOperandBitmap(Bytecode bytecode);
+
+  // Returns a debug break bytecode with a matching operand size.
+  static Bytecode GetDebugBreak(Bytecode bytecode);
+
   // Returns the size of the bytecode including its operands.
   static int Size(Bytecode bytecode);
 
   // Returns the size of |operand|.
   static OperandSize SizeOfOperand(OperandType operand);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // an immediate byte operand (OperandType::kImm8).
   static bool IsConditionalJumpImmediate(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // a constant pool entry (OperandType::kIdx8).
   static bool IsConditionalJumpConstant(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // a constant pool entry (OperandType::kIdx16).
   static bool IsConditionalJumpConstantWide(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // any kind of operand.
   static bool IsConditionalJump(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or a conditional jump taking
+  // Returns true if the bytecode is a jump or a conditional jump taking
   // an immediate byte operand (OperandType::kImm8).
   static bool IsJumpImmediate(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking a
+  // Returns true if the bytecode is a jump or conditional jump taking a
   // constant pool entry (OperandType::kIdx8).
   static bool IsJumpConstant(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking a
+  // Returns true if the bytecode is a jump or conditional jump taking a
   // constant pool entry (OperandType::kIdx16).
   static bool IsJumpConstantWide(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking
+  // Returns true if the bytecode is a jump or conditional jump taking
   // any kind of operand.
   static bool IsJump(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump, a jump, or a return.
+  // Returns true if the bytecode is a conditional jump, a jump, or a return.
   static bool IsJumpOrReturn(Bytecode bytecode);
 
+  // Returns true if the bytecode is a call or a constructor call.
+  static bool IsCallOrNew(Bytecode bytecode);
+
+  // Returns true if the bytecode is a debug break.
+  static bool IsDebugBreak(Bytecode bytecode);
+
+  // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
+  static bool IsIndexOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents an immediate.
+  static bool IsImmediateOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is a register count operand
+  // (kRegCount8/kRegCount16).
+  static bool IsRegisterCountOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is any type of register operand.
+  static bool IsRegisterOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents a register used as an input.
+  static bool IsRegisterInputOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents a register used as an output.
+  static bool IsRegisterOutputOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is a maybe register operand
+  // (kMaybeReg8/kMaybeReg16).
+  static bool IsMaybeRegisterOperandType(OperandType operand_type);
+
   // Decode a single bytecode and operands to |os|.
   static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
                               int number_of_parameters);
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index 2586e1f..e8b1281 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -85,19 +85,19 @@
   }
 }
 
-
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
-  Handle<FixedArray> fixed_array =
-      factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
+  Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
+      static_cast<int>(size()), PretenureFlag::TENURED);
   for (int i = 0; i < fixed_array->length(); i++) {
     fixed_array->set(i, *At(static_cast<size_t>(i)));
   }
+  constants_map()->Clear();
   return fixed_array;
 }
 
 
 size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
-  index_t* entry = constants_map_.Find(object);
+  index_t* entry = constants_map()->Find(object);
   return (entry == nullptr) ? AllocateEntry(object) : *entry;
 }
 
@@ -106,7 +106,7 @@
     Handle<Object> object) {
   DCHECK(!object->IsOddball());
   size_t index;
-  index_t* entry = constants_map_.Get(object);
+  index_t* entry = constants_map()->Get(object);
   if (idx8_slice_.available() > 0) {
     index = idx8_slice_.Allocate(object);
   } else {
@@ -136,7 +136,7 @@
                                                  Handle<Object> object) {
   DiscardReservedEntry(operand_size);
   size_t index;
-  index_t* entry = constants_map_.Find(object);
+  index_t* entry = constants_map()->Find(object);
   if (nullptr == entry) {
     index = AllocateEntry(object);
   } else {
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index c882b1d..d7e41e3 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -12,13 +12,15 @@
 namespace v8 {
 namespace internal {
 
-class Factory;
 class Isolate;
 
 namespace interpreter {
 
-// A helper class for constructing constant arrays for the interpreter.
-class ConstantArrayBuilder final : public ZoneObject {
+// A helper class for constructing constant arrays for the
+// interpreter. Each instance of this class is intended to be used to
+// generate exactly one FixedArray of constants via the ToFixedArray
+// method.
+class ConstantArrayBuilder final BASE_EMBEDDED {
  public:
   // Capacity of the 8-bit operand slice.
   static const size_t kLowCapacity = 1u << kBitsPerByte;
@@ -32,7 +34,7 @@
   ConstantArrayBuilder(Isolate* isolate, Zone* zone);
 
   // Generate a fixed array of constants based on inserted objects.
-  Handle<FixedArray> ToFixedArray(Factory* factory) const;
+  Handle<FixedArray> ToFixedArray();
 
   // Returns the object in the constant pool array that at index
   // |index|.
@@ -84,6 +86,8 @@
     DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
   };
 
+  IdentityMap<index_t>* constants_map() { return &constants_map_; }
+
   Isolate* isolate_;
   ConstantArraySlice idx8_slice_;
   ConstantArraySlice idx16_slice_;
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 99066e8..6510aa4 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -137,6 +137,57 @@
   builder()->Bind(&site);
 }
 
+
+void TryCatchBuilder::BeginTry(Register context) {
+  builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryCatchBuilder::EndTry() {
+  builder()->MarkTryEnd(handler_id_);
+  builder()->Jump(&exit_);
+  builder()->Bind(&handler_);
+  builder()->MarkHandler(handler_id_, true);
+}
+
+
+void TryCatchBuilder::EndCatch() { builder()->Bind(&exit_); }
+
+
+void TryFinallyBuilder::BeginTry(Register context) {
+  builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryFinallyBuilder::LeaveTry() {
+  finalization_sites_.push_back(BytecodeLabel());
+  builder()->Jump(&finalization_sites_.back());
+}
+
+
+void TryFinallyBuilder::EndTry() {
+  builder()->MarkTryEnd(handler_id_);
+}
+
+
+void TryFinallyBuilder::BeginHandler() {
+  builder()->Bind(&handler_);
+  builder()->MarkHandler(handler_id_, will_catch_);
+}
+
+
+void TryFinallyBuilder::BeginFinally() {
+  for (size_t i = 0; i < finalization_sites_.size(); i++) {
+    BytecodeLabel& site = finalization_sites_.at(i);
+    builder()->Bind(&site);
+  }
+}
+
+
+void TryFinallyBuilder::EndFinally() {
+  // Nothing to be done here.
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 24a7dfe..e4d376b 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -144,6 +144,53 @@
   ZoneVector<BytecodeLabel> case_sites_;
 };
 
+
+// A class to help with co-ordinating control flow in try-catch statements.
+class TryCatchBuilder final : public ControlFlowBuilder {
+ public:
+  explicit TryCatchBuilder(BytecodeArrayBuilder* builder)
+      : ControlFlowBuilder(builder), handler_id_(builder->NewHandlerEntry()) {}
+
+  void BeginTry(Register context);
+  void EndTry();
+  void EndCatch();
+
+ private:
+  int handler_id_;
+  BytecodeLabel handler_;
+  BytecodeLabel exit_;
+};
+
+
+// A class to help with co-ordinating control flow in try-finally statements.
+class TryFinallyBuilder final : public ControlFlowBuilder {
+ public:
+  explicit TryFinallyBuilder(BytecodeArrayBuilder* builder, bool will_catch)
+      : ControlFlowBuilder(builder),
+        handler_id_(builder->NewHandlerEntry()),
+        finalization_sites_(builder->zone()),
+        will_catch_(will_catch) {}
+
+  void BeginTry(Register context);
+  void LeaveTry();
+  void EndTry();
+  void BeginHandler();
+  void BeginFinally();
+  void EndFinally();
+
+ private:
+  int handler_id_;
+  BytecodeLabel handler_;
+
+  // Unbound labels that identify jumps to the finally block in the code.
+  ZoneVector<BytecodeLabel> finalization_sites_;
+
+  // Conservative prediction of whether exceptions thrown into the handler for
+  // this finally block will be caught. Note that such a prediction depends on
+  // whether this try-finally is nested inside a surrounding try-catch.
+  bool will_catch_;
+};
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/handler-table-builder.cc b/src/interpreter/handler-table-builder.cc
new file mode 100644
index 0000000..374089b
--- /dev/null
+++ b/src/interpreter/handler-table-builder.cc
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/handler-table-builder.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+HandlerTableBuilder::HandlerTableBuilder(Isolate* isolate, Zone* zone)
+    : isolate_(isolate), entries_(zone) {}
+
+Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
+  int handler_table_size = static_cast<int>(entries_.size());
+  Handle<HandlerTable> table =
+      Handle<HandlerTable>::cast(isolate_->factory()->NewFixedArray(
+          HandlerTable::LengthForRange(handler_table_size), TENURED));
+  for (int i = 0; i < handler_table_size; ++i) {
+    Entry& entry = entries_[i];
+    HandlerTable::CatchPrediction pred =
+        entry.will_catch ? HandlerTable::CAUGHT : HandlerTable::UNCAUGHT;
+    table->SetRangeStart(i, static_cast<int>(entry.offset_start));
+    table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
+    table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
+    table->SetRangeData(i, entry.context.index());
+  }
+  return table;
+}
+
+
+int HandlerTableBuilder::NewHandlerEntry() {
+  int handler_id = static_cast<int>(entries_.size());
+  Entry entry = {0, 0, 0, Register(), false};
+  entries_.push_back(entry);
+  return handler_id;
+}
+
+
+void HandlerTableBuilder::SetTryRegionStart(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_start = offset;
+}
+
+
+void HandlerTableBuilder::SetTryRegionEnd(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_end = offset;
+}
+
+
+void HandlerTableBuilder::SetHandlerTarget(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_target = offset;
+}
+
+
+void HandlerTableBuilder::SetPrediction(int handler_id, bool will_catch) {
+  entries_[handler_id].will_catch = will_catch;
+}
+
+
+void HandlerTableBuilder::SetContextRegister(int handler_id, Register reg) {
+  entries_[handler_id].context = reg;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/handler-table-builder.h b/src/interpreter/handler-table-builder.h
new file mode 100644
index 0000000..7356e37
--- /dev/null
+++ b/src/interpreter/handler-table-builder.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+
+#include "src/handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class HandlerTable;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing exception handler tables for the interpreter.
+class HandlerTableBuilder final BASE_EMBEDDED {
+ public:
+  HandlerTableBuilder(Isolate* isolate, Zone* zone);
+
+  // Builds the actual handler table by copying the current values into a heap
+  // object. Any further mutations to the builder won't be reflected.
+  Handle<HandlerTable> ToHandlerTable();
+
+  // Creates a new handler table entry and returns a {hander_id} identifying the
+  // entry, so that it can be referenced by below setter functions.
+  int NewHandlerEntry();
+
+  // Setter functions that modify certain values within the handler table entry
+  // being referenced by the given {handler_id}. All values will be encoded by
+  // the resulting {HandlerTable} class when copied into the heap.
+  void SetTryRegionStart(int handler_id, size_t offset);
+  void SetTryRegionEnd(int handler_id, size_t offset);
+  void SetHandlerTarget(int handler_id, size_t offset);
+  void SetPrediction(int handler_id, bool will_catch);
+  void SetContextRegister(int handler_id, Register reg);
+
+ private:
+  struct Entry {
+    size_t offset_start;   // Bytecode offset starting try-region.
+    size_t offset_end;     // Bytecode offset ending try-region.
+    size_t offset_target;  // Bytecode offset of handler target.
+    Register context;      // Register holding context for handler.
+    bool will_catch;       // Optimistic prediction for handler.
+  };
+
+  Isolate* isolate_;
+  ZoneVector<Entry> entries_;
+
+  DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
new file mode 100644
index 0000000..440e879
--- /dev/null
+++ b/src/interpreter/interpreter-assembler.cc
@@ -0,0 +1,546 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+                                           Bytecode bytecode)
+    : compiler::CodeStubAssembler(
+          isolate, zone, InterpreterDispatchDescriptor(isolate),
+          Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+      bytecode_(bytecode),
+      accumulator_(this, MachineRepresentation::kTagged),
+      context_(this, MachineRepresentation::kTagged),
+      bytecode_array_(this, MachineRepresentation::kTagged),
+      disable_stack_check_across_call_(false),
+      stack_pointer_before_call_(nullptr) {
+  accumulator_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+  context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
+  bytecode_array_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
+  if (FLAG_trace_ignition) {
+    TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+  }
+}
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+
+void InterpreterAssembler::SetAccumulator(Node* value) {
+  accumulator_.Bind(value);
+}
+
+Node* InterpreterAssembler::GetContext() { return context_.value(); }
+
+void InterpreterAssembler::SetContext(Node* value) {
+  StoreRegister(value, Register::current_context());
+  context_.Bind(value);
+}
+
+Node* InterpreterAssembler::BytecodeOffset() {
+  return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
+}
+
+Node* InterpreterAssembler::RegisterFileRawPointer() {
+  return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
+}
+
+Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+  return bytecode_array_.value();
+}
+
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+  return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
+}
+
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+  return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::LoadRegister(int offset) {
+  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+              Int32Constant(offset));
+}
+
+Node* InterpreterAssembler::LoadRegister(Register reg) {
+  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+  return WordShl(index, kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+              RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                             RegisterFileRawPointer(), Int32Constant(offset),
+                             value);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                             RegisterFileRawPointer(),
+                             RegisterFrameOffset(reg_index), value);
+}
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+  // Register indexes are negative, so the next index is minus one.
+  return IntPtrAdd(reg_index, Int32Constant(-1));
+}
+
+Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  return Load(
+      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                      bytecode_, operand_index))));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  Node* load = Load(
+      MachineType::Int8(), BytecodeArrayTaggedPointer(),
+      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                      bytecode_, operand_index))));
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kShort,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(
+        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                        bytecode_, operand_index))));
+  } else {
+    int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+    Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                            IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+    Node* second_byte =
+        Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+             IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+    return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+    return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+  }
+}
+
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+    int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kShort,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+  Node* load;
+  if (TargetSupportsUnalignedAccess()) {
+    load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+  } else {
+#if V8_TARGET_LITTLE_ENDIAN
+    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+    Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+    Node* hi_byte_offset = Int32Constant(operand_offset);
+    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+    Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+                         IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+    Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                         IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+    hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+    load = Word32Or(hi_byte, lo_byte);
+  }
+
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+    case OperandSize::kByte:
+      DCHECK_EQ(OperandType::kRegCount8,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperand(operand_index);
+    case OperandSize::kShort:
+      DCHECK_EQ(OperandType::kRegCount16,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperandShort(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+  DCHECK_EQ(OperandType::kImm8,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  return BytecodeOperandSignExtended(operand_index);
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+    case OperandSize::kByte:
+      DCHECK_EQ(OperandType::kIdx8,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperand(operand_index);
+    case OperandSize::kShort:
+      DCHECK_EQ(OperandType::kIdx16,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperandShort(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(bytecode_, operand_index);
+  if (Bytecodes::IsRegisterOperandType(operand_type)) {
+    OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+    if (operand_size == OperandSize::kByte) {
+      return BytecodeOperandSignExtended(operand_index);
+    } else if (operand_size == OperandSize::kShort) {
+      return BytecodeOperandShortSignExtended(operand_index);
+    }
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+                                        BytecodeArray::kConstantPoolOffset);
+  Node* entry_offset =
+      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+                WordShl(index, kPointerSizeLog2));
+  return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+                                                  int index) {
+  Node* entry_offset =
+      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+                WordShl(Int32Constant(index), kPointerSizeLog2));
+  return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
+  return Load(MachineType::AnyTagged(), object,
+              IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
+  return Load(MachineType::AnyTagged(), context,
+              IntPtrConstant(Context::SlotOffset(slot_index)));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+  return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+                                             Node* value) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+  return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
+Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+  Node* function = Load(
+      MachineType::AnyTagged(), RegisterFileRawPointer(),
+      IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+  Node* shared_info =
+      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+  Node* vector =
+      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+  return vector;
+}
+
+void InterpreterAssembler::CallPrologue() {
+  StoreRegister(SmiTag(BytecodeOffset()),
+                InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+  StoreRegister(BytecodeArrayTaggedPointer(),
+                InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
+
+  if (FLAG_debug_code && !disable_stack_check_across_call_) {
+    DCHECK(stack_pointer_before_call_ == nullptr);
+    stack_pointer_before_call_ = LoadStackPointer();
+  }
+}
+
+void InterpreterAssembler::CallEpilogue() {
+  if (FLAG_debug_code && !disable_stack_check_across_call_) {
+    Node* stack_pointer_after_call = LoadStackPointer();
+    Node* stack_pointer_before_call = stack_pointer_before_call_;
+    stack_pointer_before_call_ = nullptr;
+    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+                        kUnexpectedStackPointer);
+  }
+
+  // Restore bytecode array from stack frame in case the debugger has swapped us
+  // to the patched debugger bytecode array.
+  bytecode_array_.Bind(LoadRegister(
+      InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+}
+
+Node* InterpreterAssembler::CallJS(Node* function, Node* context,
+                                   Node* first_arg, Node* arg_count,
+                                   TailCallMode tail_call_mode) {
+  Callable callable =
+      CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+  Node* code_target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  first_arg, function);
+}
+
+Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
+                                          Node* new_target, Node* first_arg,
+                                          Node* arg_count) {
+  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+  Node* code_target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  new_target, constructor, first_arg);
+}
+
+Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
+                                         Node* first_arg, Node* arg_count,
+                                         int result_size) {
+  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+  Node* code_target = HeapConstant(callable.code());
+
+  // Get the function entry from the function id.
+  Node* function_table = ExternalConstant(
+      ExternalReference::runtime_function_table_address(isolate()));
+  Node* function_offset =
+      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
+  Node* function = IntPtrAdd(function_table, function_offset);
+  Node* function_entry =
+      Load(MachineType::Pointer(), function,
+           Int32Constant(offsetof(Runtime::Function, entry)));
+
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  first_arg, function_entry, result_size);
+}
+
+void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
+  CodeStubAssembler::Label ok(this);
+  CodeStubAssembler::Label interrupt_check(this);
+  CodeStubAssembler::Label end(this);
+  Node* budget_offset =
+      IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
+
+  // Update budget by |weight| and check if it reaches zero.
+  Node* old_budget =
+      Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
+  Node* new_budget = Int32Add(old_budget, weight);
+  Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+  Branch(condition, &ok, &interrupt_check);
+
+  // Perform interrupt and reset budget.
+  Bind(&interrupt_check);
+  CallRuntime(Runtime::kInterrupt, GetContext());
+  StoreNoWriteBarrier(MachineRepresentation::kWord32,
+                      BytecodeArrayTaggedPointer(), budget_offset,
+                      Int32Constant(Interpreter::InterruptBudget()));
+  Goto(&end);
+
+  // Update budget.
+  Bind(&ok);
+  StoreNoWriteBarrier(MachineRepresentation::kWord32,
+                      BytecodeArrayTaggedPointer(), budget_offset, new_budget);
+  Goto(&end);
+  Bind(&end);
+}
+
+Node* InterpreterAssembler::Advance(int delta) {
+  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+Node* InterpreterAssembler::Advance(Node* delta) {
+  return IntPtrAdd(BytecodeOffset(), delta);
+}
+
+void InterpreterAssembler::Jump(Node* delta) {
+  UpdateInterruptBudget(delta);
+  DispatchTo(Advance(delta));
+}
+
+void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
+  CodeStubAssembler::Label match(this);
+  CodeStubAssembler::Label no_match(this);
+
+  Branch(condition, &match, &no_match);
+  Bind(&match);
+  Jump(delta);
+  Bind(&no_match);
+  Dispatch();
+}
+
+void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
+  JumpConditional(WordEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
+                                              Node* delta) {
+  JumpConditional(WordNotEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::Dispatch() {
+  DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+}
+
+void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+  Node* target_bytecode = Load(
+      MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+
+  // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+  // from code object on every dispatch.
+  Node* target_code_object =
+      Load(MachineType::Pointer(), DispatchTableRawPointer(),
+           Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+
+  DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+}
+
+void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+                                                     Node* bytecode_offset) {
+  if (FLAG_trace_ignition) {
+    TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+  }
+
+  InterpreterDispatchDescriptor descriptor(isolate());
+  Node* args[] = {GetAccumulator(),          RegisterFileRawPointer(),
+                  bytecode_offset,           BytecodeArrayTaggedPointer(),
+                  DispatchTableRawPointer(), GetContext()};
+  TailCall(descriptor, handler, args, 0);
+}
+
+void InterpreterAssembler::InterpreterReturn() {
+  // TODO(rmcilroy): Investigate whether it is worth supporting self
+  // optimization of primitive functions like FullCodegen.
+
+  // Update profiling count by -BytecodeOffset to simulate backedge to start of
+  // function.
+  Node* profiling_weight =
+      Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
+               BytecodeOffset());
+  UpdateInterruptBudget(profiling_weight);
+
+  Node* exit_trampoline_code_object =
+      HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
+  DispatchToBytecodeHandler(exit_trampoline_code_object);
+}
+
+void InterpreterAssembler::StackCheck() {
+  CodeStubAssembler::Label end(this);
+  CodeStubAssembler::Label ok(this);
+  CodeStubAssembler::Label stack_guard(this);
+
+  Node* sp = LoadStackPointer();
+  Node* stack_limit = Load(
+      MachineType::Pointer(),
+      ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+  Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
+  Branch(condition, &ok, &stack_guard);
+  Bind(&stack_guard);
+  CallRuntime(Runtime::kStackGuard, GetContext());
+  Goto(&end);
+  Bind(&ok);
+  Goto(&end);
+  Bind(&end);
+}
+
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+  disable_stack_check_across_call_ = true;
+  Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+  Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+  disable_stack_check_across_call_ = false;
+  // Unreached, but keeps turbofan happy.
+  Return(ret_value);
+}
+
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+                                               BailoutReason bailout_reason) {
+  CodeStubAssembler::Label match(this);
+  CodeStubAssembler::Label no_match(this);
+
+  Node* condition = WordEqual(lhs, rhs);
+  Branch(condition, &match, &no_match);
+  Bind(&no_match);
+  Abort(bailout_reason);
+  Bind(&match);
+}
+
+void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
+  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
+              SmiTag(BytecodeOffset()), GetAccumulator());
+}
+
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+  return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+  return true;
+#else
+#error "Unknown Architecture"
+#endif
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
new file mode 100644
index 0000000..9600dfb
--- /dev/null
+++ b/src/interpreter/interpreter-assembler.h
@@ -0,0 +1,205 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/compiler/code-stub-assembler.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class InterpreterAssembler : public compiler::CodeStubAssembler {
+ public:
+  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+  virtual ~InterpreterAssembler();
+
+  // Returns the count immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandCount(int operand_index);
+  // Returns the index immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandIdx(int operand_index);
+  // Returns the Imm8 immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandImm(int operand_index);
+  // Returns the register index for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandReg(int operand_index);
+
+  // Accumulator.
+  compiler::Node* GetAccumulator();
+  void SetAccumulator(compiler::Node* value);
+
+  // Context.
+  compiler::Node* GetContext();
+  void SetContext(compiler::Node* value);
+
+  // Loads from and stores to the interpreter register file.
+  compiler::Node* LoadRegister(int offset);
+  compiler::Node* LoadRegister(Register reg);
+  compiler::Node* LoadRegister(compiler::Node* reg_index);
+  compiler::Node* StoreRegister(compiler::Node* value, int offset);
+  compiler::Node* StoreRegister(compiler::Node* value, Register reg);
+  compiler::Node* StoreRegister(compiler::Node* value,
+                                compiler::Node* reg_index);
+
+  // Returns the next consecutive register.
+  compiler::Node* NextRegister(compiler::Node* reg_index);
+
+  // Returns the location in memory of the register |reg_index| in the
+  // interpreter register file.
+  compiler::Node* RegisterLocation(compiler::Node* reg_index);
+
+  // Load constant at |index| in the constant pool.
+  compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+
+  // Load an element from a fixed array on the heap.
+  compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
+
+  // Load a field from an object on the heap.
+  compiler::Node* LoadObjectField(compiler::Node* object, int offset);
+
+  // Load |slot_index| from |context|.
+  compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
+  compiler::Node* LoadContextSlot(compiler::Node* context,
+                                  compiler::Node* slot_index);
+  // Stores |value| into |slot_index| of |context|.
+  compiler::Node* StoreContextSlot(compiler::Node* context,
+                                   compiler::Node* slot_index,
+                                   compiler::Node* value);
+
+  // Load the TypeFeedbackVector for the current function.
+  compiler::Node* LoadTypeFeedbackVector();
+
+  // Call JSFunction or Callable |function| with |arg_count|
+  // arguments (not including receiver) and the first argument
+  // located at |first_arg|.
+  compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
+                         compiler::Node* first_arg, compiler::Node* arg_count,
+                         TailCallMode tail_call_mode);
+
+  // Call constructor |constructor| with |arg_count| arguments (not
+  // including receiver) and the first argument located at
+  // |first_arg|. The |new_target| is the same as the
+  // |constructor| for the new keyword, but differs for the super
+  // keyword.
+  compiler::Node* CallConstruct(compiler::Node* constructor,
+                                compiler::Node* context,
+                                compiler::Node* new_target,
+                                compiler::Node* first_arg,
+                                compiler::Node* arg_count);
+
+  // Call runtime function with |arg_count| arguments and the first argument
+  // located at |first_arg|.
+  compiler::Node* CallRuntimeN(compiler::Node* function_id,
+                               compiler::Node* context,
+                               compiler::Node* first_arg,
+                               compiler::Node* arg_count, int return_size = 1);
+
+  // Jump relative to the current bytecode by |jump_offset|.
+  void Jump(compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // |condition| is true. Helper function for JumpIfWordEqual and
+  // JumpIfWordNotEqual.
+  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // word values |lhs| and |rhs| are equal.
+  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
+                       compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // word values |lhs| and |rhs| are not equal.
+  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+                          compiler::Node* jump_offset);
+
+  // Perform a stack guard check.
+  void StackCheck();
+
+  // Returns from the function.
+  void InterpreterReturn();
+
+  // Dispatch to the bytecode.
+  void Dispatch();
+
+  // Dispatch to bytecode handler.
+  void DispatchToBytecodeHandler(compiler::Node* handler,
+                                 compiler::Node* bytecode_offset);
+  void DispatchToBytecodeHandler(compiler::Node* handler) {
+    DispatchToBytecodeHandler(handler, BytecodeOffset());
+  }
+
+  // Abort with the given bailout reason.
+  void Abort(BailoutReason bailout_reason);
+
+ protected:
+  static bool TargetSupportsUnalignedAccess();
+
+ private:
+  // Returns a raw pointer to start of the register file on the stack.
+  compiler::Node* RegisterFileRawPointer();
+  // Returns a tagged pointer to the current function's BytecodeArray object.
+  compiler::Node* BytecodeArrayTaggedPointer();
+  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+  compiler::Node* BytecodeOffset();
+  // Returns a raw pointer to first entry in the interpreter dispatch table.
+  compiler::Node* DispatchTableRawPointer();
+
+  // Saves and restores interpreter bytecode offset to the interpreter stack
+  // frame when performing a call.
+  void CallPrologue() override;
+  void CallEpilogue() override;
+
+  // Traces the current bytecode by calling |function_id|.
+  void TraceBytecode(Runtime::FunctionId function_id);
+
+  // Updates the bytecode array's interrupt budget by |weight| and calls
+  // Runtime::kInterrupt if counter reaches zero.
+  void UpdateInterruptBudget(compiler::Node* weight);
+
+  // Returns the offset of register |index| relative to RegisterFilePointer().
+  compiler::Node* RegisterFrameOffset(compiler::Node* index);
+
+  compiler::Node* BytecodeOperand(int operand_index);
+  compiler::Node* BytecodeOperandSignExtended(int operand_index);
+  compiler::Node* BytecodeOperandShort(int operand_index);
+  compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+
+  // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
+  // update BytecodeOffset() itself.
+  compiler::Node* Advance(int delta);
+  compiler::Node* Advance(compiler::Node* delta);
+
+  // Starts next instruction dispatch at |new_bytecode_offset|.
+  void DispatchTo(compiler::Node* new_bytecode_offset);
+
+  // Abort operations for debug code.
+  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+                           BailoutReason bailout_reason);
+
+  Bytecode bytecode_;
+  CodeStubAssembler::Variable accumulator_;
+  CodeStubAssembler::Variable context_;
+  CodeStubAssembler::Variable bytecode_array_;
+
+  bool disable_stack_check_across_call_;
+  compiler::Node* stack_pointer_before_call_;
+
+  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 574602b..eb88342 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -4,12 +4,13 @@
 
 #include "src/interpreter/interpreter.h"
 
+#include "src/ast/prettyprinter.h"
 #include "src/code-factory.h"
 #include "src/compiler.h"
-#include "src/compiler/interpreter-assembler.h"
 #include "src/factory.h"
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -20,52 +21,77 @@
 
 #define __ assembler->
 
-
-Interpreter::Interpreter(Isolate* isolate)
-    : isolate_(isolate) {}
-
-
-// static
-Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
-    Isolate* isolate) {
-  Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
-      static_cast<int>(Bytecode::kLast) + 1, TENURED);
-  // We rely on the interpreter handler table being immovable, so check that
-  // it was allocated on the first page (which is always immovable).
-  DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
-      handler_table->address()));
-  return handler_table;
+Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+  memset(&dispatch_table_, 0, sizeof(dispatch_table_));
 }
 
-
 void Interpreter::Initialize() {
   DCHECK(FLAG_ignition);
-  Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
-  if (!IsInterpreterTableInitialized(handler_table)) {
-    Zone zone;
-    HandleScope scope(isolate_);
+  if (IsDispatchTableInitialized()) return;
+  Zone zone;
+  HandleScope scope(isolate_);
 
-#define GENERATE_CODE(Name, ...)                                      \
-    {                                                                 \
-      compiler::InterpreterAssembler assembler(isolate_, &zone,       \
-                                               Bytecode::k##Name);    \
-      Do##Name(&assembler);                                           \
-      Handle<Code> code = assembler.GenerateCode();                   \
-      handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
-    }
-    BYTECODE_LIST(GENERATE_CODE)
-#undef GENERATE_CODE
+#define GENERATE_CODE(Name, ...)                                        \
+  {                                                                     \
+    InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
+    Do##Name(&assembler);                                               \
+    Handle<Code> code = assembler.GenerateCode();                       \
+    TraceCodegen(code, #Name);                                          \
+    dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code;      \
   }
+  BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
 }
 
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+  DCHECK(IsDispatchTableInitialized());
+  return dispatch_table_[Bytecodes::ToByte(bytecode)];
+}
+
+void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+  v->VisitPointers(
+      reinterpret_cast<Object**>(&dispatch_table_[0]),
+      reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+}
+
+// static
+int Interpreter::InterruptBudget() {
+  // TODO(ignition): Tune code size multiplier.
+  const int kCodeSizeMultiplier = 32;
+  return FLAG_interrupt_budget * kCodeSizeMultiplier;
+}
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+    OFStream os(stdout);
+    base::SmartArrayPointer<char> name = info->GetDebugName();
+    os << "[generating bytecode for function: " << info->GetDebugName().get()
+       << "]" << std::endl
+       << std::flush;
+  }
+
+#ifdef DEBUG
+  if (info->parse_info() && FLAG_print_source) {
+    OFStream os(stdout);
+    os << "--- Source from AST ---" << std::endl
+       << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
+       << std::endl
+       << std::flush;
+  }
+
+  if (info->parse_info() && FLAG_print_ast) {
+    OFStream os(stdout);
+    os << "--- AST ---" << std::endl
+       << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
+       << std::flush;
+  }
+#endif  // DEBUG
+
   BytecodeGenerator generator(info->isolate(), info->zone());
   info->EnsureFeedbackVector();
   Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
-    os << "Function: " << info->GetDebugName().get() << std::endl;
     bytecodes->Print(os);
     os << std::flush;
   }
@@ -75,18 +101,28 @@
   return true;
 }
 
-
-bool Interpreter::IsInterpreterTableInitialized(
-    Handle<FixedArray> handler_table) {
-  DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
-  return handler_table->get(0) != isolate_->heap()->undefined_value();
+bool Interpreter::IsDispatchTableInitialized() {
+  if (FLAG_trace_ignition) {
+    // Regenerate table to add bytecode tracing operations.
+    return false;
+  }
+  return dispatch_table_[0] != nullptr;
 }
 
+void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_trace_ignition_codegen) {
+    OFStream os(stdout);
+    code->Disassemble(name, os);
+    os << std::flush;
+  }
+#endif  // ENABLE_DISASSEMBLER
+}
 
 // LdaZero
 //
 // Load literal '0' into the accumulator.
-void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
   Node* zero_value = __ NumberConstant(0.0);
   __ SetAccumulator(zero_value);
   __ Dispatch();
@@ -96,15 +132,14 @@
 // LdaSmi8 <imm8>
 //
 // Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
   Node* raw_int = __ BytecodeOperandImm(0);
   Node* smi_int = __ SmiTag(raw_int);
   __ SetAccumulator(smi_int);
   __ Dispatch();
 }
 
-
-void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   __ SetAccumulator(constant);
@@ -115,7 +150,7 @@
 // LdaConstant <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -123,7 +158,7 @@
 // LdaConstantWide <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -131,7 +166,7 @@
 // LdaUndefined
 //
 // Load Undefined into the accumulator.
-void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
   __ SetAccumulator(undefined_value);
@@ -142,7 +177,7 @@
 // LdaNull
 //
 // Load Null into the accumulator.
-void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   __ SetAccumulator(null_value);
   __ Dispatch();
@@ -152,7 +187,7 @@
 // LdaTheHole
 //
 // Load TheHole into the accumulator.
-void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
   __ SetAccumulator(the_hole_value);
   __ Dispatch();
@@ -162,7 +197,7 @@
 // LdaTrue
 //
 // Load True into the accumulator.
-void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
   __ SetAccumulator(true_value);
   __ Dispatch();
@@ -172,7 +207,7 @@
 // LdaFalse
 //
 // Load False into the accumulator.
-void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
   __ SetAccumulator(false_value);
   __ Dispatch();
@@ -182,7 +217,7 @@
 // Ldar <src>
 //
 // Load accumulator with value from register <src>.
-void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* value = __ LoadRegister(reg_index);
   __ SetAccumulator(value);
@@ -193,7 +228,7 @@
 // Star <dst>
 //
 // Store accumulator to register <dst>.
-void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* accumulator = __ GetAccumulator();
   __ StoreRegister(accumulator, reg_index);
@@ -201,32 +236,10 @@
 }
 
 
-// Exchange <reg8> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
-  Node* reg0_index = __ BytecodeOperandReg(0);
-  Node* reg1_index = __ BytecodeOperandReg(1);
-  Node* reg0_value = __ LoadRegister(reg0_index);
-  Node* reg1_value = __ LoadRegister(reg1_index);
-  __ StoreRegister(reg1_value, reg0_index);
-  __ StoreRegister(reg0_value, reg1_index);
-  __ Dispatch();
-}
-
-
-// ExchangeWide <reg16> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
-  return DoExchange(assembler);
-}
-
-
 // Mov <src> <dst>
 //
 // Stores the value of register <src> to register <dst>.
-void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMov(InterpreterAssembler* assembler) {
   Node* src_index = __ BytecodeOperandReg(0);
   Node* src_value = __ LoadRegister(src_index);
   Node* dst_index = __ BytecodeOperandReg(1);
@@ -235,8 +248,14 @@
 }
 
 
-void Interpreter::DoLoadGlobal(Callable ic,
-                               compiler::InterpreterAssembler* assembler) {
+// MovWide <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
+  DoMov(assembler);
+}
+
+void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -250,109 +269,54 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
-                           type_feedback_vector);
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobal <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+// LdaGlobalWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+// LdaGlobalInsideTypeofWide <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
 
-// LdaGlobalInsideTypeofStrict <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-void Interpreter::DoStoreGlobal(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -367,8 +331,8 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
-            type_feedback_vector);
+  __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+              smi_slot, type_feedback_vector);
 
   __ Dispatch();
 }
@@ -378,7 +342,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -389,7 +353,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -400,8 +364,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -412,8 +375,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -423,7 +385,7 @@
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   Node* slot_index = __ BytecodeOperandIdx(1);
@@ -436,8 +398,7 @@
 // LdaContextSlotWide <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
   DoLdaContextSlot(assembler);
 }
 
@@ -445,7 +406,7 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
@@ -458,19 +419,16 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
   DoStaContextSlot(assembler);
 }
 
-
 void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
-                                   compiler::InterpreterAssembler* assembler) {
+                                   InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* result_pair = __ CallRuntime(function_id, context, name);
-  Node* result = __ Projection(0, result_pair);
+  Node* result = __ CallRuntime(function_id, context, name);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -480,7 +438,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
   DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
@@ -489,9 +447,8 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeof(
-    compiler::InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
+  DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
 
@@ -499,8 +456,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
   DoLdaLookupSlot(assembler);
 }
 
@@ -510,20 +466,20 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
 void Interpreter::DoLdaLookupSlotInsideTypeofWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoLdaLookupSlotInsideTypeof(assembler);
 }
 
-
 void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
-                                    compiler::InterpreterAssembler* assembler) {
+                                    InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* language_mode_node = __ NumberConstant(language_mode);
-  Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
-                                language_mode_node);
+  Node* result = __ CallRuntime(is_strict(language_mode)
+                                    ? Runtime::kStoreLookupSlot_Strict
+                                    : Runtime::kStoreLookupSlot_Sloppy,
+                                context, name, value);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -533,8 +489,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
 }
 
@@ -543,8 +498,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::STRICT, assembler);
 }
 
@@ -553,8 +507,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotSloppy(assembler);
 }
 
@@ -563,14 +516,11 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotStrict(assembler);
 }
 
-
-void Interpreter::DoLoadIC(Callable ic,
-                           compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -579,61 +529,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LoadICSloppy <object> <name_index> <slot>
+// LoadIC <object> <name_index> <slot>
 //
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+// LoadICWide <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
   DoLoadIC(ic, assembler);
 }
 
 
-// LoadICStrict <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICStrictWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoKeyedLoadIC(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -641,63 +565,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// KeyedLoadICSloppy <object> <slot>
+// KeyedLoadIC <object> <slot>
 //
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
   Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+// KeyedLoadICWide <object> <slot>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
   DoKeyedLoadIC(ic, assembler);
 }
 
 
-// KeyedLoadICStrict <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICSloppyWide <object> <slot>
-//
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICStrictWide <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoStoreIC(Callable ic,
-                            compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -707,8 +603,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -718,7 +615,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -730,7 +627,7 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -742,8 +639,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -755,16 +651,13 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
 }
 
-
-void Interpreter::DoKeyedStoreIC(Callable ic,
-                                 compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -774,8 +667,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -784,8 +678,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -796,8 +689,7 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -808,8 +700,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -820,22 +711,22 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
 }
 
-
 // PushContext <context>
 //
-// Pushes the accumulator as the current context, and saves it in <context>
-void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+// Saves the current context in <context>, and pushes the accumulator as the
+// new current context.
+void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ GetAccumulator();
-  __ SetContext(context);
-  __ StoreRegister(context, reg_index);
+  Node* new_context = __ GetAccumulator();
+  Node* old_context = __ GetContext();
+  __ StoreRegister(old_context, reg_index);
+  __ SetContext(new_context);
   __ Dispatch();
 }
 
@@ -843,22 +734,22 @@
 // PopContext <context>
 //
 // Pops the current context and sets <context> as the new context.
-void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   __ SetContext(context);
   __ Dispatch();
 }
 
-
 void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
-                             compiler::InterpreterAssembler* assembler) {
+                             InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
   // operations, instead of calling builtins directly.
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* lhs = __ LoadRegister(reg_index);
   Node* rhs = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, lhs, rhs);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, lhs, rhs);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -867,7 +758,7 @@
 // Add <src>
 //
 // Add register <src> to accumulator.
-void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoAdd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kAdd, assembler);
 }
 
@@ -875,7 +766,7 @@
 // Sub <src>
 //
 // Subtract register <src> from accumulator.
-void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoSub(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kSubtract, assembler);
 }
 
@@ -883,7 +774,7 @@
 // Mul <src>
 //
 // Multiply accumulator by register <src>.
-void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMul(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kMultiply, assembler);
 }
 
@@ -891,7 +782,7 @@
 // Div <src>
 //
 // Divide register <src> by accumulator.
-void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDiv(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kDivide, assembler);
 }
 
@@ -899,7 +790,7 @@
 // Mod <src>
 //
 // Modulo register <src> by accumulator.
-void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMod(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kModulus, assembler);
 }
 
@@ -907,7 +798,7 @@
 // BitwiseOr <src>
 //
 // BitwiseOr register <src> to accumulator.
-void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseOr, assembler);
 }
 
@@ -915,7 +806,7 @@
 // BitwiseXor <src>
 //
 // BitwiseXor register <src> to accumulator.
-void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseXor, assembler);
 }
 
@@ -923,7 +814,7 @@
 // BitwiseAnd <src>
 //
 // BitwiseAnd register <src> to accumulator.
-void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseAnd, assembler);
 }
 
@@ -934,7 +825,7 @@
 // Register <src> is converted to an int32 and the accumulator to uint32
 // before the operation. 5 lsb bits from the accumulator are used as count
 // i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftLeft, assembler);
 }
 
@@ -945,7 +836,7 @@
 // Result is sign extended. Register <src> is converted to an int32 and the
 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
 // are used as count i.e. <src> >> (accumulator & 0x1F).
-void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRight, assembler);
 }
 
@@ -956,17 +847,16 @@
 // Result is zero-filled. The accumulator and register <src> are converted to
 // uint32 before the operation 5 lsb bits from the accumulator are used as
 // count i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftRightLogical(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRightLogical, assembler);
 }
 
-
 void Interpreter::DoCountOp(Runtime::FunctionId function_id,
-                            compiler::InterpreterAssembler* assembler) {
+                            InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* one = __ NumberConstant(1);
-  Node* result = __ CallRuntime(function_id, value, one);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, value, one);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -975,7 +865,7 @@
 // Inc
 //
 // Increments value in the accumulator by one.
-void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoInc(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kAdd, assembler);
 }
 
@@ -983,7 +873,7 @@
 // Dec
 //
 // Decrements value in the accumulator by one.
-void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDec(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kSubtract, assembler);
 }
 
@@ -992,9 +882,11 @@
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1004,20 +896,22 @@
 //
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
-void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
-                           compiler::InterpreterAssembler* assembler) {
+                           InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
   Node* key = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, object, key);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, object, key);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1027,8 +921,7 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following strict mode semantics.
-void Interpreter::DoDeletePropertyStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
 }
 
@@ -1037,34 +930,23 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following sloppy mode semantics.
-void Interpreter::DoDeletePropertySloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
 }
 
-
-// DeleteLookupSlot
-//
-// Delete the variable with the name specified in the accumulator by dynamically
-// looking it up.
-void Interpreter::DoDeleteLookupSlot(
-    compiler::InterpreterAssembler* assembler) {
-  Node* name = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-
-void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJSCall(InterpreterAssembler* assembler,
+                           TailCallMode tail_call_mode) {
   Node* function_reg = __ BytecodeOperandReg(0);
   Node* function = __ LoadRegister(function_reg);
   Node* receiver_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+  Node* context = __ GetContext();
+  // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
+  Node* result =
+      __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1074,8 +956,8 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
 
@@ -1083,8 +965,35 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
+// TailCall <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+// TailCallWide <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 
@@ -1093,13 +1002,37 @@
 // Call the runtime function |function_id| with the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
 // registers.
-void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
+  // Call the runtime function.
   Node* function_id = __ BytecodeOperandIdx(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
-  Node* result = __ CallRuntime(function_id, first_arg, args_count);
-  __ SetAccumulator(result);
+  Node* context = __ GetContext();
+  Node* result_pair =
+      __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
+
+  // Store the results in <first_return> and <first_return + 1>
+  Node* first_return_reg = __ BytecodeOperandReg(3);
+  Node* second_return_reg = __ NextRegister(first_return_reg);
+  Node* result0 = __ Projection(0, result_pair);
+  Node* result1 = __ Projection(1, result_pair);
+  __ StoreRegister(result0, first_return_reg);
+  __ StoreRegister(result1, second_return_reg);
   __ Dispatch();
 }
 
@@ -1110,36 +1043,28 @@
 // first argument in register |first_arg| and |arg_count| arguments in
 // subsequent registers. Returns the result in <first_return> and
 // <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(
-    compiler::InterpreterAssembler* assembler) {
-  // Call the runtime function.
-  Node* function_id = __ BytecodeOperandIdx(0);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
-
-  // Store the results in <first_return> and <first_return + 1>
-  Node* first_return_reg = __ BytecodeOperandReg(3);
-  Node* second_return_reg = __ NextRegister(first_return_reg);
-  Node* result0 = __ Projection(0, result_pair);
-  Node* result1 = __ Projection(1, result_pair);
-  __ StoreRegister(result0, first_return_reg);
-  __ StoreRegister(result1, second_return_reg);
-
-  __ Dispatch();
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
 }
 
 
-// CallJSRuntime <context_index> <receiver> <arg_count>
+// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
 //
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
+}
+
+void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
 
   // Get the function to call from the native context.
   Node* context = __ GetContext();
@@ -1148,7 +1073,41 @@
   Node* function = __ LoadContextSlot(native_context, context_index);
 
   // Call the function.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* result = __ CallJS(function, context, first_arg, args_count,
+                           TailCallMode::kDisallow);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+
+// CallJSRuntimeWide <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+  Node* new_target = __ GetAccumulator();
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallConstruct(constructor, context, new_target, first_arg, args_count);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1158,42 +1117,45 @@
 //
 // Call operator new with |constructor| and the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
 //
-void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
-  Node* constructor_reg = __ BytecodeOperandReg(0);
-  Node* constructor = __ LoadRegister(constructor_reg);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result =
-      __ CallConstruct(constructor, constructor, first_arg, args_count);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoNew(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
+}
+
+
+// NewWide <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
 }
 
 
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
-void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterEquals, assembler);
+void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kEqual, assembler);
 }
 
 
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
-void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
+void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kNotEqual, assembler);
 }
 
 
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
-void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
+void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictEqual, assembler);
 }
 
 
@@ -1201,25 +1163,24 @@
 //
 // Test if the value in the <src> register is not strictly equal to the
 // accumulator.
-void Interpreter::DoTestNotEqualStrict(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
+void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictNotEqual, assembler);
 }
 
 
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
-void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
+void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThan, assembler);
 }
 
 
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
-void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
+void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThan, assembler);
 }
 
 
@@ -1227,9 +1188,8 @@
 //
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
-void Interpreter::DoTestLessThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
+void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
 }
 
 
@@ -1237,9 +1197,8 @@
 //
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
-void Interpreter::DoTestGreaterThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
+void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
 }
 
 
@@ -1247,7 +1206,7 @@
 //
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
-void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kHasProperty, assembler);
 }
 
@@ -1256,7 +1215,7 @@
 //
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
-void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kInstanceOf, assembler);
 }
 
@@ -1264,9 +1223,10 @@
 // ToName
 //
 // Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1275,9 +1235,10 @@
 // ToNumber
 //
 // Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1286,9 +1247,10 @@
 // ToObject
 //
 // Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1297,7 +1259,7 @@
 // Jump <imm8>
 //
 // Jump by number of bytes represented by the immediate operand |imm8|.
-void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJump(InterpreterAssembler* assembler) {
   Node* relative_jump = __ BytecodeOperandImm(0);
   __ Jump(relative_jump);
 }
@@ -1306,7 +1268,7 @@
 // JumpConstant <idx8>
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
-void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1318,8 +1280,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the
 // constant pool.
-void Interpreter::DoJumpConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
   DoJumpConstant(assembler);
 }
 
@@ -1328,7 +1289,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains true.
-void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
@@ -1340,8 +1301,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1355,8 +1315,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfTrueConstant(assembler);
 }
 
@@ -1365,7 +1324,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains false.
-void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
@@ -1377,8 +1336,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1392,8 +1350,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfFalseConstant(assembler);
 }
 
@@ -1402,11 +1359,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanTrue(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1419,10 +1376,11 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1437,7 +1395,7 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanTrueConstant(assembler);
 }
 
@@ -1446,11 +1404,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanFalse(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1463,10 +1421,11 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1481,7 +1440,7 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanFalseConstant(assembler);
 }
 
@@ -1490,7 +1449,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* relative_jump = __ BytecodeOperandImm(0);
@@ -1502,8 +1461,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* index = __ BytecodeOperandIdx(0);
@@ -1517,17 +1475,15 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfNullConstant(assembler);
 }
 
-
-// jumpifundefined <imm8>
+// JumpIfUndefined <imm8>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1540,8 +1496,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1557,13 +1512,44 @@
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
 void Interpreter::DoJumpIfUndefinedConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfUndefinedConstant(assembler);
 }
 
+// JumpIfNotHole <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the hole.
+void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
+  DoJumpIfNotHoleConstant(assembler);
+}
 
 void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
-                                  compiler::InterpreterAssembler* assembler) {
+                                  InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1571,7 +1557,8 @@
   Node* flags_raw = __ BytecodeOperandImm(2);
   Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(function_id, closure, literal_index,
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, closure, literal_index,
                                 constant_elements, flags);
   __ SetAccumulator(result);
   __ Dispatch();
@@ -1582,8 +1569,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1592,8 +1578,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1602,8 +1587,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1612,8 +1596,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1622,8 +1605,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1632,8 +1614,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1642,15 +1623,16 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
   // calling into the runtime.
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
   Node* tenured_raw = __ BytecodeOperandImm(1);
   Node* tenured = __ SmiTag(tenured_raw);
+  Node* context = __ GetContext();
   Node* result =
-      __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+      __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1660,8 +1642,7 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
   return DoCreateClosure(assembler);
 }
 
@@ -1669,10 +1650,11 @@
 // CreateMappedArguments
 //
 // Creates a new mapped arguments object.
-void Interpreter::DoCreateMappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1681,21 +1663,56 @@
 // CreateUnmappedArguments
 //
 // Creates a new unmapped arguments object.
-void Interpreter::DoCreateUnmappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* context = __ GetContext();
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// CreateRestParameter
+//
+// Creates a new rest parameter array.
+void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// StackCheck
+//
+// Performs a stack guard check.
+void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
+  __ StackCheck();
+  __ Dispatch();
+}
 
 // Throw
 //
 // Throws the exception in the accumulator.
-void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoThrow(InterpreterAssembler* assembler) {
   Node* exception = __ GetAccumulator();
-  __ CallRuntime(Runtime::kThrow, exception);
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kThrow, context, exception);
+  // We shouldn't ever return from a throw.
+  __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
+// ReThrow
+//
+// Re-throws the exception in the accumulator.
+void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
+  Node* exception = __ GetAccumulator();
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kReThrow, context, exception);
   // We shouldn't ever return from a throw.
   __ Abort(kUnexpectedReturnFromThrow);
 }
@@ -1704,59 +1721,105 @@
 // Return
 //
 // Return the value in the accumulator.
-void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
-  __ Return();
+void Interpreter::DoReturn(InterpreterAssembler* assembler) {
+  __ InterpreterReturn();
 }
 
+// Debugger
+//
+// Call runtime to handle debugger statement.
+void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+  __ Dispatch();
+}
 
-// ForInPrepare <cache_type> <cache_array> <cache_length>
+// DebugBreak
+//
+// Call runtime to handle a debug break.
+#define DEBUG_BREAK(Name, ...)                                              \
+  void Interpreter::Do##Name(InterpreterAssembler* assembler) {             \
+    Node* context = __ GetContext();                                        \
+    Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
+    __ DispatchToBytecodeHandler(original_handler);                         \
+  }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
+
+// ForInPrepare <cache_info_triple>
 //
 // Returns state for for..in loop execution based on the object in the
-// accumulator. The registers |cache_type|, |cache_array|, and
-// |cache_length| represent output parameters.
-void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
   Node* object = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+  Node* context = __ GetContext();
+  Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
+
+  // Set output registers:
+  //   0 == cache_type, 1 == cache_array, 2 == cache_length
+  Node* output_register = __ BytecodeOperandReg(0);
   for (int i = 0; i < 3; i++) {
-    // 0 == cache_type, 1 == cache_array, 2 == cache_length
-    Node* cache_info = __ LoadFixedArrayElement(result, i);
-    Node* cache_info_reg = __ BytecodeOperandReg(i);
-    __ StoreRegister(cache_info, cache_info_reg);
+    Node* cache_info = __ Projection(i, result_triple);
+    __ StoreRegister(cache_info, output_register);
+    output_register = __ NextRegister(output_register);
   }
+  __ Dispatch();
+}
+
+
+// ForInPrepareWide <cache_info_triple>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
+  DoForInPrepare(assembler);
+}
+
+
+// ForInNext <receiver> <index> <cache_info_pair>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
+  Node* receiver_reg = __ BytecodeOperandReg(0);
+  Node* receiver = __ LoadRegister(receiver_reg);
+  Node* index_reg = __ BytecodeOperandReg(1);
+  Node* index = __ LoadRegister(index_reg);
+  Node* cache_type_reg = __ BytecodeOperandReg(2);
+  Node* cache_type = __ LoadRegister(cache_type_reg);
+  Node* cache_array_reg = __ NextRegister(cache_type_reg);
+  Node* cache_array = __ LoadRegister(cache_array_reg);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
+                                cache_array, cache_type, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
 
-// ForInNext <receiver> <cache_type> <cache_array> <index>
+// ForInNextWide <receiver> <index> <cache_info_pair>
 //
 // Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
-  Node* receiver_reg = __ BytecodeOperandReg(0);
-  Node* receiver = __ LoadRegister(receiver_reg);
-  Node* cache_type_reg = __ BytecodeOperandReg(1);
-  Node* cache_type = __ LoadRegister(cache_type_reg);
-  Node* cache_array_reg = __ BytecodeOperandReg(2);
-  Node* cache_array = __ LoadRegister(cache_array_reg);
-  Node* index_reg = __ BytecodeOperandReg(3);
-  Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
-                                cache_type, index);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
+  return DoForInNext(assembler);
 }
 
 
 // ForInDone <index> <cache_length>
 //
 // Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
   Node* cache_length = __ LoadRegister(cache_length_reg);
-  Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1766,11 +1829,12 @@
 //
 // Increments the loop counter in register |index| and stores the result
 // in the accumulator.
-void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInStep, index);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index ef9b5d1..e02e914 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -21,91 +21,113 @@
 class Callable;
 class CompilationInfo;
 
-namespace compiler {
-class InterpreterAssembler;
-}
-
 namespace interpreter {
 
+class InterpreterAssembler;
+
 class Interpreter {
  public:
   explicit Interpreter(Isolate* isolate);
   virtual ~Interpreter() {}
 
-  // Creates an uninitialized interpreter handler table, where each handler
-  // points to the Illegal builtin.
-  static Handle<FixedArray> CreateUninitializedInterpreterTable(
-      Isolate* isolate);
-
-  // Initializes the interpreter.
+  // Initializes the interpreter dispatch table.
   void Initialize();
 
+  // Returns the interrupt budget which should be used for the profiler counter.
+  static int InterruptBudget();
+
   // Generate bytecode for |info|.
   static bool MakeBytecode(CompilationInfo* info);
 
+  // Return bytecode handler for |bytecode|.
+  Code* GetBytecodeHandler(Bytecode bytecode);
+
+  // GC support.
+  void IterateDispatchTable(ObjectVisitor* v);
+
+  void TraceCodegen(Handle<Code> code, const char* name);
+
+  Address dispatch_table_address() {
+    return reinterpret_cast<Address>(&dispatch_table_[0]);
+  }
+
  private:
 // Bytecode handler generator functions.
 #define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
-  void Do##Name(compiler::InterpreterAssembler* assembler);
+  void Do##Name(InterpreterAssembler* assembler);
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
   // Generates code to perform the binary operations via |function_id|.
   void DoBinaryOp(Runtime::FunctionId function_id,
-                  compiler::InterpreterAssembler* assembler);
+                  InterpreterAssembler* assembler);
 
   // Generates code to perform the count operations via |function_id|.
   void DoCountOp(Runtime::FunctionId function_id,
-                 compiler::InterpreterAssembler* assembler);
+                 InterpreterAssembler* assembler);
 
   // Generates code to perform the comparison operation associated with
   // |compare_op|.
-  void DoCompareOp(Token::Value compare_op,
-                   compiler::InterpreterAssembler* assembler);
+  void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
 
   // Generates code to load a constant from the constant pool.
-  void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+  void DoLoadConstant(InterpreterAssembler* assembler);
 
   // Generates code to perform a global load via |ic|.
-  void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a global store via |ic|.
-  void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a named property load via |ic|.
-  void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property load via |ic|.
-  void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a namedproperty store via |ic|.
-  void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property store via |ic|.
-  void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a JS call.
-  void DoJSCall(compiler::InterpreterAssembler* assembler);
+  void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
+
+  // Generates code to perform a runtime call.
+  void DoCallRuntimeCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a runtime call returning a pair.
+  void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a JS runtime call.
+  void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a constructor call..
+  void DoCallConstruct(InterpreterAssembler* assembler);
 
   // Generates code ro create a literal via |function_id|.
   void DoCreateLiteral(Runtime::FunctionId function_id,
-                       compiler::InterpreterAssembler* assembler);
+                       InterpreterAssembler* assembler);
 
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
-                compiler::InterpreterAssembler* assembler);
+                InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot load via |function_id|.
   void DoLoadLookupSlot(Runtime::FunctionId function_id,
-                        compiler::InterpreterAssembler* assembler);
+                        InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot store depending on |language_mode|.
   void DoStoreLookupSlot(LanguageMode language_mode,
-                         compiler::InterpreterAssembler* assembler);
+                         InterpreterAssembler* assembler);
 
-  bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
+  bool IsDispatchTableInitialized();
+
+  static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
 
   Isolate* isolate_;
+  Code* dispatch_table_[kDispatchTableSize];
 
   DISALLOW_COPY_AND_ASSIGN(Interpreter);
 };
diff --git a/src/interpreter/register-translator.cc b/src/interpreter/register-translator.cc
new file mode 100644
index 0000000..3eba42f
--- /dev/null
+++ b/src/interpreter/register-translator.cc
@@ -0,0 +1,173 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/register-translator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+RegisterTranslator::RegisterTranslator(RegisterMover* mover)
+    : mover_(mover),
+      emitting_moves_(false),
+      window_registers_count_(0),
+      output_moves_count_(0) {}
+
+void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
+                                                 uint32_t* raw_operands,
+                                                 int raw_operand_count) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
+  if (!emitting_moves_) {
+    emitting_moves_ = true;
+    DCHECK_EQ(window_registers_count_, 0);
+    int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
+    for (int i = 0; i < raw_operand_count; i++) {
+      if ((register_bitmap & (1 << i)) == 0) {
+        continue;
+      }
+      Register in_reg = Register::FromRawOperand(raw_operands[i]);
+      Register out_reg = TranslateAndMove(bytecode, i, in_reg);
+      raw_operands[i] = out_reg.ToRawOperand();
+    }
+    window_registers_count_ = 0;
+    emitting_moves_ = false;
+  } else {
+    // When the register translator is translating registers, it will
+    // cause the bytecode generator to emit moves on it's behalf. This
+    // path is reached by these moves.
+    DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
+           Register::FromRawOperand(raw_operands[0]).is_valid() &&
+           Register::FromRawOperand(raw_operands[1]).is_valid());
+  }
+}
+
+Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
+                                              int operand_index, Register reg) {
+  if (FitsInReg8Operand(reg)) {
+    return reg;
+  }
+
+  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+  OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+  if (operand_size == OperandSize::kShort) {
+    CHECK(FitsInReg16Operand(reg));
+    return Translate(reg);
+  }
+
+  CHECK((operand_type == OperandType::kReg8 ||
+         operand_type == OperandType::kRegOut8) &&
+        RegisterIsMovableToWindow(bytecode, operand_index));
+  Register translated_reg = Translate(reg);
+  Register window_reg(kTranslationWindowStart + window_registers_count_);
+  window_registers_count_ += 1;
+  if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+    DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
+    mover()->MoveRegisterUntranslated(translated_reg, window_reg);
+  } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+    DCHECK_LT(output_moves_count_, kTranslationWindowLength);
+    output_moves_[output_moves_count_] =
+        std::make_pair(window_reg, translated_reg);
+    output_moves_count_ += 1;
+  } else {
+    UNREACHABLE();
+  }
+  return window_reg;
+}
+
+// static
+bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
+                                                   int operand_index) {
+  // By design, we only support moving individual registers. There
+  // should be wide variants of such bytecodes instead to avoid the
+  // need for a large translation window.
+  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+  if (operand_type != OperandType::kReg8 &&
+      operand_type != OperandType::kRegOut8) {
+    return false;
+  } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
+    return true;
+  } else {
+    OperandType next_operand_type =
+        Bytecodes::GetOperandType(bytecode, operand_index + 1);
+    return (next_operand_type != OperandType::kRegCount8 &&
+            next_operand_type != OperandType::kRegCount16);
+  }
+}
+
+void RegisterTranslator::TranslateOutputRegisters() {
+  if (!emitting_moves_) {
+    emitting_moves_ = true;
+    while (output_moves_count_ > 0) {
+      output_moves_count_ -= 1;
+      mover()->MoveRegisterUntranslated(
+          output_moves_[output_moves_count_].first,
+          output_moves_[output_moves_count_].second);
+    }
+    emitting_moves_ = false;
+  }
+}
+
+// static
+Register RegisterTranslator::Translate(Register reg) {
+  if (reg.index() >= kTranslationWindowStart) {
+    return Register(reg.index() + kTranslationWindowLength);
+  } else {
+    return reg;
+  }
+}
+
+// static
+bool RegisterTranslator::InTranslationWindow(Register reg) {
+  return (reg.index() >= kTranslationWindowStart &&
+          reg.index() <= kTranslationWindowLimit);
+}
+
+// static
+Register RegisterTranslator::UntranslateRegister(Register reg) {
+  if (reg.index() >= kTranslationWindowStart) {
+    return Register(reg.index() - kTranslationWindowLength);
+  } else {
+    return reg;
+  }
+}
+
+// static
+int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
+  return kTranslationWindowStart - reg.index();
+}
+
+// static
+bool RegisterTranslator::FitsInReg8Operand(Register reg) {
+  return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
+}
+
+// static
+bool RegisterTranslator::FitsInReg16Operand(Register reg) {
+  int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
+  return reg.is_short_operand() && reg.index() < max_index;
+}
+
+// static
+int RegisterTranslator::RegisterCountAdjustment(int register_count,
+                                                int parameter_count) {
+  if (register_count > kTranslationWindowStart) {
+    return kTranslationWindowLength;
+  } else if (parameter_count > 0) {
+    Register param0 = Register::FromParameterIndex(0, parameter_count);
+    if (!param0.is_byte_operand()) {
+      // TODO(oth): Number of parameters means translation is
+      // required, but the translation window location is such that
+      // some space is wasted. Hopefully a rare corner case, but could
+      // relocate window to limit waste.
+      return kTranslationWindowLimit + 1 - register_count;
+    }
+  }
+  return 0;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/register-translator.h b/src/interpreter/register-translator.h
new file mode 100644
index 0000000..b683a89
--- /dev/null
+++ b/src/interpreter/register-translator.h
@@ -0,0 +1,119 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RegisterMover;
+
+// A class that enables bytecodes having only byte sized register operands
+// to access all registers in the two byte space. Most bytecode uses few
+// registers so space can be saved if most bytecodes with register operands
+// just take byte operands.
+//
+// To reach the wider register space, a translation window is reserved in
+// the byte addressable space specifically for copying registers into and
+// out of before a bytecode is emitted. The translation window occupies
+// the last register slots at the top of the byte addressable range.
+//
+// Because of the translation window any registers which naturally lie
+// at above the translation window have to have their register index
+// incremented by the window width before they are emitted.
+//
+// This class does not support moving ranges of registers to and from
+// the translation window. It would be straightforward to add support
+// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
+// these would have two negative effects. The translation window would
+// need to be wider, further limiting the space for byte operands. And
+// every register in a range would need to be moved consuming more
+// space in the bytecode array.
+class RegisterTranslator final {
+ public:
+  explicit RegisterTranslator(RegisterMover* mover);
+
+  // Translate and re-write the register operands that are inputs
+  // to |bytecode| when it is about to be emitted.
+  void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
+                               int raw_operand_count);
+
+  // Translate and re-write the register operands that are outputs
+  // from |bytecode| when it has just been output.
+  void TranslateOutputRegisters();
+
+  // Returns true if |reg| is in the translation window.
+  static bool InTranslationWindow(Register reg);
+
+  // Return register value as if it had been translated.
+  static Register UntranslateRegister(Register reg);
+
+  // Returns the distance in registers between the translation window
+  // start and |reg|. The result is negative when |reg| is above the
+  // start of the translation window.
+  static int DistanceToTranslationWindow(Register reg);
+
+  // Returns true if |reg| can be represented as an 8-bit operand
+  // after translation.
+  static bool FitsInReg8Operand(Register reg);
+
+  // Returns true if |reg| can be represented as an 16-bit operand
+  // after translation.
+  static bool FitsInReg16Operand(Register reg);
+
+  // Returns the increment to the register count necessary if the
+  // value indicates the translation window is required.
+  static int RegisterCountAdjustment(int register_count, int parameter_count);
+
+ private:
+  static const int kTranslationWindowLength = 4;
+  static const int kTranslationWindowLimit = -kMinInt8;
+  static const int kTranslationWindowStart =
+      kTranslationWindowLimit - kTranslationWindowLength + 1;
+
+  Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
+  static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
+
+  static Register Translate(Register reg);
+
+  RegisterMover* mover() const { return mover_; }
+
+  // Entity to perform register moves necessary to translate registers
+  // and ensure reachability.
+  RegisterMover* mover_;
+
+  // Flag to avoid re-entrancy when emitting move bytecodes for
+  // translation.
+  bool emitting_moves_;
+
+  // Number of window registers in use.
+  int window_registers_count_;
+
+  // State for restoring register moves emitted by TranslateOutputRegisters.
+  std::pair<Register, Register> output_moves_[kTranslationWindowLength];
+  int output_moves_count_;
+};
+
+// Interface for RegisterTranslator helper class that will emit
+// register move bytecodes at the translator's behest.
+class RegisterMover {
+ public:
+  virtual ~RegisterMover() {}
+
+  // Move register |from| to register |to| with no translation.
+  // returns false if either register operand is invalid. Implementations
+  // of this method must be aware that register moves with bad
+  // register values are a security hole.
+  virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
new file mode 100644
index 0000000..0b7c44e
--- /dev/null
+++ b/src/interpreter/source-position-table.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/source-position-table.h"
+
+#include "src/assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class IsStatementField : public BitField<bool, 0, 1> {};
+class SourcePositionField : public BitField<int, 1, 30> {};
+
+void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
+                                                      int source_position) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If a position has already been assigned to this bytecode offset,
+  // do not reassign a new statement position.
+  if (CodeOffsetHasPosition(offset)) return;
+  uint32_t encoded = IsStatementField::encode(true) |
+                     SourcePositionField::encode(source_position);
+  entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
+                                                       int source_position) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If a position has already been assigned to this bytecode offset,
+  // do not reassign a new statement position.
+  if (CodeOffsetHasPosition(offset)) return;
+  uint32_t encoded = IsStatementField::encode(false) |
+                     SourcePositionField::encode(source_position);
+  entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If we already added a source position table entry, but the bytecode array
+  // builder ended up not outputting a bytecode for the corresponding bytecode
+  // offset, we have to remove that entry.
+  if (CodeOffsetHasPosition(offset)) entries_.pop_back();
+}
+
+Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
+  int length = static_cast<int>(entries_.size());
+  Handle<FixedArray> table =
+      isolate_->factory()->NewFixedArray(length * 2, TENURED);
+  for (int i = 0; i < length; i++) {
+    table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
+    table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+  }
+  return table;
+}
+
+SourcePositionTableIterator::SourcePositionTableIterator(
+    BytecodeArray* bytecode_array)
+    : table_(bytecode_array->source_position_table()),
+      index_(0),
+      length_(table_->length()) {
+  DCHECK(table_->length() % 2 == 0);
+  Advance();
+}
+
+void SourcePositionTableIterator::Advance() {
+  if (index_ < length_) {
+    int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
+    // Bytecode offsets are in ascending order.
+    DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
+    bytecode_offset_ = new_bytecode_offset;
+    uint32_t source_position_and_type =
+        static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
+    is_statement_ = IsStatementField::decode(source_position_and_type);
+    source_position_ = SourcePositionField::decode(source_position_and_type);
+  }
+  index_ += 2;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
new file mode 100644
index 0000000..336cf42
--- /dev/null
+++ b/src/interpreter/source-position-table.h
@@ -0,0 +1,82 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/handles.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class FixedArray;
+class Isolate;
+
+namespace interpreter {
+
+class SourcePositionTableBuilder {
+ public:
+  explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+      : isolate_(isolate), entries_(zone) {}
+
+  void AddStatementPosition(size_t bytecode_offset, int source_position);
+  void AddExpressionPosition(size_t bytecode_offset, int source_position);
+  void RevertPosition(size_t bytecode_offset);
+  Handle<FixedArray> ToFixedArray();
+
+ private:
+  struct Entry {
+    int bytecode_offset;
+    uint32_t source_position_and_type;
+  };
+
+  bool CodeOffsetHasPosition(int bytecode_offset) {
+    // Return whether bytecode offset already has a position assigned.
+    return entries_.size() > 0 &&
+           entries_.back().bytecode_offset == bytecode_offset;
+  }
+
+  Isolate* isolate_;
+  ZoneVector<Entry> entries_;
+};
+
+class SourcePositionTableIterator {
+ public:
+  explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+
+  void Advance();
+
+  int bytecode_offset() const {
+    DCHECK(!done());
+    return bytecode_offset_;
+  }
+  int source_position() const {
+    DCHECK(!done());
+    return source_position_;
+  }
+  bool is_statement() const {
+    DCHECK(!done());
+    return is_statement_;
+  }
+  bool done() const { return index_ > length_; }
+
+ private:
+  FixedArray* table_;
+  int index_;
+  int length_;
+  bool is_statement_;
+  int bytecode_offset_;
+  int source_position_;
+  DisallowHeapAllocation no_gc;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_SOURCE_POSITION_TABLE_H_