Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/interpreter/DEPS b/src/interpreter/DEPS
new file mode 100644
index 0000000..f8d6b98
--- /dev/null
+++ b/src/interpreter/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+src/compiler/interpreter-assembler.h",
+]
diff --git a/src/interpreter/OWNERS b/src/interpreter/OWNERS
new file mode 100644
index 0000000..5ad730c
--- /dev/null
+++ b/src/interpreter/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+
+bmeurer@chromium.org
+mstarzinger@chromium.org
+oth@chromium.org
+rmcilroy@chromium.org
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
new file mode 100644
index 0000000..1b15fc6
--- /dev/null
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -0,0 +1,1608 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder::PreviousBytecodeHelper {
+ public:
+  explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
+      : array_builder_(array_builder),
+        previous_bytecode_start_(array_builder_.last_bytecode_start_) {
+    // This helper is expected to be instantiated only when the last bytecode is
+    // in the same basic block.
+    DCHECK(array_builder_.LastBytecodeInSameBlock());
+  }
+
+  // Returns the previous bytecode in the same basic block.
+  MUST_USE_RESULT Bytecode GetBytecode() const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    return Bytecodes::FromByte(
+        array_builder_.bytecodes()->at(previous_bytecode_start_));
+  }
+
+  // Returns the operand at operand_index for the previous bytecode in the
+  // same basic block.
+  MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
+    DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+    Bytecode bytecode = GetBytecode();
+    DCHECK_GE(operand_index, 0);
+    DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
+    size_t operand_offset =
+        previous_bytecode_start_ +
+        Bytecodes::GetOperandOffset(bytecode, operand_index);
+    OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
+    switch (size) {
+      default:
+      case OperandSize::kNone:
+        UNREACHABLE();
+      case OperandSize::kByte:
+        return static_cast<uint32_t>(
+            array_builder_.bytecodes()->at(operand_offset));
+      case OperandSize::kShort:
+        uint16_t operand =
+            (array_builder_.bytecodes()->at(operand_offset) << 8) +
+            array_builder_.bytecodes()->at(operand_offset + 1);
+        return static_cast<uint32_t>(operand);
+    }
+  }
+
+  Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+    return array_builder_.constant_array_builder()->At(
+        GetOperand(operand_index));
+  }
+
+ private:
+  const BytecodeArrayBuilder& array_builder_;
+  size_t previous_bytecode_start_;
+
+  DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
+};
+
+
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
+    : isolate_(isolate),
+      zone_(zone),
+      bytecodes_(zone),
+      bytecode_generated_(false),
+      constant_array_builder_(isolate, zone),
+      last_block_end_(0),
+      last_bytecode_start_(~0),
+      exit_seen_in_block_(false),
+      unbound_jumps_(0),
+      parameter_count_(-1),
+      local_register_count_(-1),
+      context_register_count_(-1),
+      temporary_register_count_(0),
+      free_temporaries_(zone) {}
+
+
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
+
+
+void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
+  local_register_count_ = number_of_locals;
+  DCHECK_LE(context_register_count_, 0);
+}
+
+
+void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
+  parameter_count_ = number_of_parameters;
+}
+
+
+void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
+  context_register_count_ = number_of_contexts;
+  DCHECK_GE(local_register_count_, 0);
+}
+
+
+Register BytecodeArrayBuilder::first_context_register() const {
+  DCHECK_GT(context_register_count_, 0);
+  return Register(local_register_count_);
+}
+
+
+Register BytecodeArrayBuilder::last_context_register() const {
+  DCHECK_GT(context_register_count_, 0);
+  return Register(local_register_count_ + context_register_count_ - 1);
+}
+
+
+Register BytecodeArrayBuilder::first_temporary_register() const {
+  DCHECK_GT(temporary_register_count_, 0);
+  return Register(fixed_register_count());
+}
+
+
+Register BytecodeArrayBuilder::last_temporary_register() const {
+  DCHECK_GT(temporary_register_count_, 0);
+  return Register(fixed_register_count() + temporary_register_count_ - 1);
+}
+
+
+Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
+  DCHECK_GE(parameter_index, 0);
+  return Register::FromParameterIndex(parameter_index, parameter_count());
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
+  return reg.is_parameter() || reg.index() < locals_count();
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
+  return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
+         reg <= last_temporary_register();
+}
+
+
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
+  DCHECK_EQ(bytecode_generated_, false);
+  EnsureReturn();
+
+  int bytecode_size = static_cast<int>(bytecodes_.size());
+  int register_count = fixed_register_count() + temporary_register_count_;
+  int frame_size = register_count * kPointerSize;
+  Factory* factory = isolate_->factory();
+  Handle<FixedArray> constant_pool =
+      constant_array_builder()->ToFixedArray(factory);
+  Handle<BytecodeArray> output =
+      factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
+                                parameter_count(), constant_pool);
+  bytecode_generated_ = true;
+  return output;
+}
+
+
+template <size_t N>
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
+  last_bytecode_start_ = bytecodes()->size();
+  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+  for (int i = 0; i < static_cast<int>(N); i++) {
+    DCHECK(OperandIsValid(bytecode, i, operands[i]));
+    switch (Bytecodes::GetOperandSize(bytecode, i)) {
+      case OperandSize::kNone:
+        UNREACHABLE();
+      case OperandSize::kByte:
+        bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+        break;
+      case OperandSize::kShort: {
+        uint8_t operand_bytes[2];
+        WriteUnalignedUInt16(operand_bytes, operands[i]);
+        bytecodes()->insert(bytecodes()->end(), operand_bytes,
+                            operand_bytes + 2);
+        break;
+      }
+    }
+  }
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2,
+                                  uint32_t operand3) {
+  uint32_t operands[] = {operand0, operand1, operand2, operand3};
+  Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2) {
+  uint32_t operands[] = {operand0, operand1, operand2};
+  Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1) {
+  uint32_t operands[] = {operand0, operand1};
+  Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+  uint32_t operands[] = {operand0};
+  Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode) {
+  // Don't output dead code.
+  if (exit_seen_in_block_) return;
+
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+  last_bytecode_start_ = bytecodes()->size();
+  bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
+                                                            Register reg,
+                                                            Strength strength) {
+  if (is_strong(strength)) {
+    UNIMPLEMENTED();
+  }
+
+  Output(BytecodeForBinaryOperation(op), reg.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+                                                           Strength strength) {
+  if (is_strong(strength)) {
+    UNIMPLEMENTED();
+  }
+
+  Output(BytecodeForCountOperation(op));
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
+  Output(Bytecode::kLogicalNot);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
+  Output(Bytecode::kTypeOf);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
+    Token::Value op, Register reg, Strength strength) {
+  if (is_strong(strength)) {
+    UNIMPLEMENTED();
+  }
+
+  Output(BytecodeForCompareOperation(op), reg.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+    v8::internal::Smi* smi) {
+  int32_t raw_smi = smi->value();
+  if (raw_smi == 0) {
+    Output(Bytecode::kLdaZero);
+  } else if (raw_smi >= -128 && raw_smi <= 127) {
+    Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
+  } else {
+    LoadLiteral(Handle<Object>(smi, isolate_));
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
+  size_t entry = GetConstantPoolEntry(object);
+  if (FitsInIdx8Operand(entry)) {
+    Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
+  } else if (FitsInIdx16Operand(entry)) {
+    Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
+  Output(Bytecode::kLdaUndefined);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
+  Output(Bytecode::kLdaNull);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
+  Output(Bytecode::kLdaTheHole);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
+  Output(Bytecode::kLdaTrue);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
+  Output(Bytecode::kLdaFalse);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
+  if (value) {
+    LoadTrue();
+  } else {
+    LoadFalse();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
+    Register reg) {
+  if (!IsRegisterInAccumulator(reg)) {
+    Output(Bytecode::kLdar, reg.ToOperand());
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
+    Register reg) {
+  // TODO(oth): Avoid storing the accumulator in the register if the
+  // previous bytecode loaded the accumulator with the same register.
+  //
+  // TODO(oth): If the previous bytecode is a MOV into this register,
+  // the previous instruction can be removed. The logic for determining
+  // these redundant MOVs appears complex.
+  Output(Bytecode::kStar, reg.ToOperand());
+  if (!IsRegisterInAccumulator(reg)) {
+    Output(Bytecode::kStar, reg.ToOperand());
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
+                                                         Register to) {
+  DCHECK(from != to);
+  Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
+                                                              Register reg1) {
+  DCHECK(reg0 != reg1);
+  if (FitsInReg8Operand(reg0)) {
+    Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
+  } else if (FitsInReg8Operand(reg1)) {
+    Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+  } else {
+    Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
+    const Handle<String> name, int feedback_slot, LanguageMode language_mode,
+    TypeofMode typeof_mode) {
+  // TODO(rmcilroy): Potentially store language and typeof information in an
+  // operand rather than having extra bytecodes.
+  Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, static_cast<uint8_t>(name_index),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(name_index) &&
+             FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
+    const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, static_cast<uint8_t>(name_index),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(name_index) &&
+             FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
+                                                            int slot_index) {
+  DCHECK(slot_index >= 0);
+  if (FitsInIdx8Operand(slot_index)) {
+    Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+           static_cast<uint8_t>(slot_index));
+  } else if (FitsInIdx16Operand(slot_index)) {
+    Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+           static_cast<uint16_t>(slot_index));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
+                                                             int slot_index) {
+  DCHECK(slot_index >= 0);
+  if (FitsInIdx8Operand(slot_index)) {
+    Output(Bytecode::kStaContextSlot, context.ToOperand(),
+           static_cast<uint8_t>(slot_index));
+  } else if (FitsInIdx16Operand(slot_index)) {
+    Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+           static_cast<uint16_t>(slot_index));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
+    const Handle<String> name, TypeofMode typeof_mode) {
+  Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+                          ? Bytecode::kLdaLookupSlotInsideTypeof
+                          : Bytecode::kLdaLookupSlot;
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index)) {
+    Output(bytecode, static_cast<uint8_t>(name_index));
+  } else if (FitsInIdx16Operand(name_index)) {
+    Output(BytecodeForWideOperands(bytecode),
+           static_cast<uint16_t>(name_index));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
+    const Handle<String> name, LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index)) {
+    Output(bytecode, static_cast<uint8_t>(name_index));
+  } else if (FitsInIdx16Operand(name_index)) {
+    Output(BytecodeForWideOperands(bytecode),
+           static_cast<uint16_t>(name_index));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
+    Register object, const Handle<String> name, int feedback_slot,
+    LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForLoadIC(language_mode);
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(name_index) &&
+             FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+           static_cast<uint16_t>(name_index),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
+    Register object, int feedback_slot, LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+  if (FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+    Register object, const Handle<String> name, int feedback_slot,
+    LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForStoreIC(language_mode);
+  size_t name_index = GetConstantPoolEntry(name);
+  if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(name_index) &&
+             FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+           static_cast<uint16_t>(name_index),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
+    Register object, Register key, int feedback_slot,
+    LanguageMode language_mode) {
+  Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
+  if (FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, object.ToOperand(), key.ToOperand(),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(feedback_slot)) {
+    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+           key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+    Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
+  size_t entry = GetConstantPoolEntry(shared_info);
+  DCHECK(FitsInImm8Operand(tenured));
+  if (FitsInIdx8Operand(entry)) {
+    Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
+           static_cast<uint8_t>(tenured));
+  } else if (FitsInIdx16Operand(entry)) {
+    Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
+           static_cast<uint8_t>(tenured));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
+    CreateArgumentsType type) {
+  // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
+  // than having two different bytecodes once we have better support for
+  // branches in the InterpreterAssembler.
+  Bytecode bytecode = BytecodeForCreateArguments(type);
+  Output(bytecode);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
+    Handle<String> pattern, int literal_index, int flags) {
+  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
+  size_t pattern_entry = GetConstantPoolEntry(pattern);
+  if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
+    Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
+           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+  } else if (FitsInIdx16Operand(literal_index) &&
+             FitsInIdx16Operand(pattern_entry)) {
+    Output(Bytecode::kCreateRegExpLiteralWide,
+           static_cast<uint16_t>(pattern_entry),
+           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
+    Handle<FixedArray> constant_elements, int literal_index, int flags) {
+  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
+  size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+  if (FitsInIdx8Operand(literal_index) &&
+      FitsInIdx8Operand(constant_elements_entry)) {
+    Output(Bytecode::kCreateArrayLiteral,
+           static_cast<uint8_t>(constant_elements_entry),
+           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+  } else if (FitsInIdx16Operand(literal_index) &&
+             FitsInIdx16Operand(constant_elements_entry)) {
+    Output(Bytecode::kCreateArrayLiteralWide,
+           static_cast<uint16_t>(constant_elements_entry),
+           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
+    Handle<FixedArray> constant_properties, int literal_index, int flags) {
+  DCHECK(FitsInImm8Operand(flags));  // Flags should fit in 8 bits.
+  size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
+  if (FitsInIdx8Operand(literal_index) &&
+      FitsInIdx8Operand(constant_properties_entry)) {
+    Output(Bytecode::kCreateObjectLiteral,
+           static_cast<uint8_t>(constant_properties_entry),
+           static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+  } else if (FitsInIdx16Operand(literal_index) &&
+             FitsInIdx16Operand(constant_properties_entry)) {
+    Output(Bytecode::kCreateObjectLiteralWide,
+           static_cast<uint16_t>(constant_properties_entry),
+           static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
+  Output(Bytecode::kPushContext, context.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
+  Output(Bytecode::kPopContext, context.ToOperand());
+  return *this;
+}
+
+
+bool BytecodeArrayBuilder::NeedToBooleanCast() {
+  if (!LastBytecodeInSameBlock()) {
+    return true;
+  }
+  PreviousBytecodeHelper previous_bytecode(*this);
+  switch (previous_bytecode.GetBytecode()) {
+    // If the previous bytecode puts a boolean in the accumulator return true.
+    case Bytecode::kLdaTrue:
+    case Bytecode::kLdaFalse:
+    case Bytecode::kLogicalNot:
+    case Bytecode::kTestEqual:
+    case Bytecode::kTestNotEqual:
+    case Bytecode::kTestEqualStrict:
+    case Bytecode::kTestNotEqualStrict:
+    case Bytecode::kTestLessThan:
+    case Bytecode::kTestLessThanOrEqual:
+    case Bytecode::kTestGreaterThan:
+    case Bytecode::kTestGreaterThanOrEqual:
+    case Bytecode::kTestInstanceOf:
+    case Bytecode::kTestIn:
+    case Bytecode::kForInDone:
+      return false;
+    default:
+      return true;
+  }
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
+  Output(Bytecode::kToObject);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
+  if (LastBytecodeInSameBlock()) {
+    PreviousBytecodeHelper previous_bytecode(*this);
+    switch (previous_bytecode.GetBytecode()) {
+      case Bytecode::kToName:
+      case Bytecode::kTypeOf:
+        return *this;
+      case Bytecode::kLdaConstantWide:
+      case Bytecode::kLdaConstant: {
+        Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
+        if (object->IsName()) return *this;
+        break;
+      }
+      default:
+        break;
+    }
+  }
+  Output(Bytecode::kToName);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
+  // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
+  // a number.
+  Output(Bytecode::kToNumber);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(bytecodes()->end(), bytecodes()->begin() + label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
+  label->bind_to(bytecodes()->size());
+  LeaveBasicBlock();
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
+                                                 BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
+  DCHECK(target.is_bound());
+  PatchJump(bytecodes()->begin() + target.offset(),
+            bytecodes()->begin() + label->offset());
+  label->bind_to(target.offset());
+  LeaveBasicBlock();
+  return *this;
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
+    Bytecode jump_bytecode) {
+  switch (jump_bytecode) {
+    case Bytecode::kJump:
+      return Bytecode::kJumpConstant;
+    case Bytecode::kJumpIfTrue:
+      return Bytecode::kJumpIfTrueConstant;
+    case Bytecode::kJumpIfFalse:
+      return Bytecode::kJumpIfFalseConstant;
+    case Bytecode::kJumpIfToBooleanTrue:
+      return Bytecode::kJumpIfToBooleanTrueConstant;
+    case Bytecode::kJumpIfToBooleanFalse:
+      return Bytecode::kJumpIfToBooleanFalseConstant;
+    case Bytecode::kJumpIfNull:
+      return Bytecode::kJumpIfNullConstant;
+    case Bytecode::kJumpIfUndefined:
+      return Bytecode::kJumpIfUndefinedConstant;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
+    Bytecode jump_bytecode) {
+  switch (jump_bytecode) {
+    case Bytecode::kJump:
+      return Bytecode::kJumpConstantWide;
+    case Bytecode::kJumpIfTrue:
+      return Bytecode::kJumpIfTrueConstantWide;
+    case Bytecode::kJumpIfFalse:
+      return Bytecode::kJumpIfFalseConstantWide;
+    case Bytecode::kJumpIfToBooleanTrue:
+      return Bytecode::kJumpIfToBooleanTrueConstantWide;
+    case Bytecode::kJumpIfToBooleanFalse:
+      return Bytecode::kJumpIfToBooleanFalseConstantWide;
+    case Bytecode::kJumpIfNull:
+      return Bytecode::kJumpIfNullConstantWide;
+    case Bytecode::kJumpIfUndefined:
+      return Bytecode::kJumpIfUndefinedConstantWide;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
+  switch (jump_bytecode) {
+    case Bytecode::kJump:
+    case Bytecode::kJumpIfNull:
+    case Bytecode::kJumpIfUndefined:
+      return jump_bytecode;
+    case Bytecode::kJumpIfTrue:
+      return Bytecode::kJumpIfToBooleanTrue;
+    case Bytecode::kJumpIfFalse:
+      return Bytecode::kJumpIfToBooleanFalse;
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
+    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+  DCHECK_EQ(*operand_location, 0);
+  if (FitsInImm8Operand(delta)) {
+    // The jump fits within the range of an Imm8 operand, so cancel
+    // the reservation and jump directly.
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+    *operand_location = static_cast<uint8_t>(delta);
+  } else {
+    // The jump does not fit within the range of an Imm8 operand, so
+    // commit reservation putting the offset into the constant pool,
+    // and update the jump instruction and operand.
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+    DCHECK(FitsInIdx8Operand(entry));
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    *jump_location = Bytecodes::ToByte(jump_bytecode);
+    *operand_location = static_cast<uint8_t>(entry);
+  }
+}
+
+
+void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
+    const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+  DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+  ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+  size_t entry = constant_array_builder()->CommitReservedEntry(
+      OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+  DCHECK(FitsInIdx16Operand(entry));
+  uint8_t operand_bytes[2];
+  WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
+  *operand_location++ = operand_bytes[0];
+  *operand_location = operand_bytes[1];
+}
+
+
+void BytecodeArrayBuilder::PatchJump(
+    const ZoneVector<uint8_t>::iterator& jump_target,
+    const ZoneVector<uint8_t>::iterator& jump_location) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+  int delta = static_cast<int>(jump_target - jump_location);
+  DCHECK(Bytecodes::IsJump(jump_bytecode));
+  switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
+    case OperandSize::kByte:
+      PatchIndirectJumpWith8BitOperand(jump_location, delta);
+      break;
+    case OperandSize::kShort:
+      PatchIndirectJumpWith16BitOperand(jump_location, delta);
+      break;
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  unbound_jumps_--;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
+                                                       BytecodeLabel* label) {
+  // Don't emit dead code.
+  if (exit_seen_in_block_) return *this;
+
+  // Check if the value in accumulator is boolean, if not choose an
+  // appropriate JumpIfToBoolean bytecode.
+  if (NeedToBooleanCast()) {
+    jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
+  }
+
+  if (label->is_bound()) {
+    // Label has been bound already so this is a backwards jump.
+    CHECK_GE(bytecodes()->size(), label->offset());
+    CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
+    size_t abs_delta = bytecodes()->size() - label->offset();
+    int delta = -static_cast<int>(abs_delta);
+
+    if (FitsInImm8Operand(delta)) {
+      Output(jump_bytecode, static_cast<uint8_t>(delta));
+    } else {
+      size_t entry =
+          GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+      if (FitsInIdx8Operand(entry)) {
+        Output(GetJumpWithConstantOperand(jump_bytecode),
+               static_cast<uint8_t>(entry));
+      } else if (FitsInIdx16Operand(entry)) {
+        Output(GetJumpWithConstantWideOperand(jump_bytecode),
+               static_cast<uint16_t>(entry));
+      } else {
+        UNREACHABLE();
+      }
+    }
+  } else {
+    // The label has not yet been bound so this is a forward reference
+    // that will be patched when the label is bound. We create a
+    // reservation in the constant pool so the jump can be patched
+    // when the label is bound. The reservation means the maximum size
+    // of the operand for the constant is known and the jump can
+    // be emitted into the bytecode stream with space for the operand.
+    label->set_referrer(bytecodes()->size());
+    unbound_jumps_++;
+    OperandSize reserved_operand_size =
+        constant_array_builder()->CreateReservedEntry();
+    switch (reserved_operand_size) {
+      case OperandSize::kByte:
+        Output(jump_bytecode, 0);
+        break;
+      case OperandSize::kShort:
+        Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
+        break;
+      case OperandSize::kNone:
+        UNREACHABLE();
+    }
+  }
+  LeaveBasicBlock();
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJump, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfTrue, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfFalse, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfNull, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
+    BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfUndefined, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
+  Output(Bytecode::kThrow);
+  exit_seen_in_block_ = true;
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
+  Output(Bytecode::kReturn);
+  exit_seen_in_block_ = true;
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
+    Register cache_type, Register cache_array, Register cache_length) {
+  Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
+         cache_array.ToOperand(), cache_length.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
+                                                      Register cache_length) {
+  Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
+                                                      Register cache_type,
+                                                      Register cache_array,
+                                                      Register index) {
+  Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
+         cache_array.ToOperand(), index.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
+  Output(Bytecode::kForInStep, index.ToOperand());
+  return *this;
+}
+
+
+void BytecodeArrayBuilder::LeaveBasicBlock() {
+  last_block_end_ = bytecodes()->size();
+  exit_seen_in_block_ = false;
+}
+
+
+void BytecodeArrayBuilder::EnsureReturn() {
+  if (!exit_seen_in_block_) {
+    LoadUndefined();
+    Return();
+  }
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
+                                                 Register receiver,
+                                                 size_t arg_count,
+                                                 int feedback_slot) {
+  if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
+    Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
+           static_cast<uint8_t>(arg_count),
+           static_cast<uint8_t>(feedback_slot));
+  } else if (FitsInIdx16Operand(arg_count) &&
+             FitsInIdx16Operand(feedback_slot)) {
+    Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
+           static_cast<uint16_t>(arg_count),
+           static_cast<uint16_t>(feedback_slot));
+  } else {
+    UNIMPLEMENTED();
+  }
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
+                                                Register first_arg,
+                                                size_t arg_count) {
+  if (!first_arg.is_valid()) {
+    DCHECK_EQ(0u, arg_count);
+    first_arg = Register(0);
+  }
+  DCHECK(FitsInIdx8Operand(arg_count));
+  Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
+         static_cast<uint8_t>(arg_count));
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+    Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+  DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
+  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(FitsInIdx8Operand(arg_count));
+  if (!first_arg.is_valid()) {
+    DCHECK_EQ(0u, arg_count);
+    first_arg = Register(0);
+  }
+  Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+         first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+    Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
+    Register first_return) {
+  DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
+  DCHECK(FitsInIdx16Operand(function_id));
+  DCHECK(FitsInIdx8Operand(arg_count));
+  if (!first_arg.is_valid()) {
+    DCHECK_EQ(0u, arg_count);
+    first_arg = Register(0);
+  }
+  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+         first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
+         first_return.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
+                                                          Register receiver,
+                                                          size_t arg_count) {
+  DCHECK(FitsInIdx16Operand(context_index));
+  DCHECK(FitsInIdx8Operand(arg_count));
+  Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+         receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
+                                                   LanguageMode language_mode) {
+  Output(BytecodeForDelete(language_mode), object.ToOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
+  Output(Bytecode::kDeleteLookupSlot);
+  return *this;
+}
+
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
+  return constant_array_builder()->Insert(object);
+}
+
+
+int BytecodeArrayBuilder::BorrowTemporaryRegister() {
+  if (free_temporaries_.empty()) {
+    temporary_register_count_ += 1;
+    return last_temporary_register().index();
+  } else {
+    auto pos = free_temporaries_.begin();
+    int retval = *pos;
+    free_temporaries_.erase(pos);
+    return retval;
+  }
+}
+
+
+int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
+                                                            int end_index) {
+  auto index = free_temporaries_.lower_bound(start_index);
+  if (index == free_temporaries_.begin()) {
+    // If start_index is the first free register, check for a register
+    // greater than end_index.
+    index = free_temporaries_.upper_bound(end_index);
+    if (index == free_temporaries_.end()) {
+      temporary_register_count_ += 1;
+      return last_temporary_register().index();
+    }
+  } else {
+    // If there is a free register < start_index
+    index--;
+  }
+
+  int retval = *index;
+  free_temporaries_.erase(index);
+  return retval;
+}
+
+
+void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+  free_temporaries_.erase(reg_index);
+}
+
+
+void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+  free_temporaries_.insert(reg_index);
+}
+
+
+int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
+    size_t count) {
+  if (count == 0) {
+    return -1;
+  }
+
+  // Search within existing temporaries for a run.
+  auto start = free_temporaries_.begin();
+  size_t run_length = 0;
+  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+    if (*run_end != *start + static_cast<int>(run_length)) {
+      start = run_end;
+      run_length = 0;
+    }
+    if (++run_length == count) {
+      return *start;
+    }
+  }
+
+  // Continue run if possible across existing last temporary.
+  if (temporary_register_count_ > 0 &&
+      (start == free_temporaries_.end() ||
+       *start + static_cast<int>(run_length) !=
+           last_temporary_register().index() + 1)) {
+    run_length = 0;
+  }
+
+  // Ensure enough registers for run.
+  while (run_length++ < count) {
+    temporary_register_count_++;
+    free_temporaries_.insert(last_temporary_register().index());
+  }
+  return last_temporary_register().index() - static_cast<int>(count) + 1;
+}
+
+
+bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
+  if (temporary_register_count_ > 0) {
+    DCHECK(reg.index() >= first_temporary_register().index() &&
+           reg.index() <= last_temporary_register().index());
+    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+  } else {
+    return false;
+  }
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
+  if (reg.is_function_context() || reg.is_function_closure() ||
+      reg.is_new_target()) {
+    return true;
+  } else if (reg.is_parameter()) {
+    int parameter_index = reg.ToParameterIndex(parameter_count_);
+    return parameter_index >= 0 && parameter_index < parameter_count_;
+  } else if (reg.index() < fixed_register_count()) {
+    return true;
+  } else {
+    return TemporaryRegisterIsLive(reg);
+  }
+}
+
+
+bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
+                                          uint32_t operand_value) const {
+  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+  switch (operand_type) {
+    case OperandType::kNone:
+      return false;
+    case OperandType::kCount16:
+    case OperandType::kIdx16:
+      return static_cast<uint16_t>(operand_value) == operand_value;
+    case OperandType::kCount8:
+    case OperandType::kImm8:
+    case OperandType::kIdx8:
+      return static_cast<uint8_t>(operand_value) == operand_value;
+    case OperandType::kMaybeReg8:
+      if (operand_value == 0) {
+        return true;
+      }
+    // Fall-through to kReg8 case.
+    case OperandType::kReg8:
+      return RegisterIsValid(
+          Register::FromOperand(static_cast<uint8_t>(operand_value)));
+    case OperandType::kRegPair8: {
+      Register reg0 =
+          Register::FromOperand(static_cast<uint8_t>(operand_value));
+      Register reg1 = Register(reg0.index() + 1);
+      return RegisterIsValid(reg0) && RegisterIsValid(reg1);
+    }
+    case OperandType::kReg16:
+      if (bytecode != Bytecode::kExchange &&
+          bytecode != Bytecode::kExchangeWide) {
+        return false;
+      }
+      return RegisterIsValid(
+          Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
+  return last_bytecode_start_ < bytecodes()->size() &&
+         last_bytecode_start_ >= last_block_end_;
+}
+
+
+bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
+  if (LastBytecodeInSameBlock()) {
+    PreviousBytecodeHelper previous_bytecode(*this);
+    Bytecode bytecode = previous_bytecode.GetBytecode();
+    if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
+        (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
+  switch (op) {
+    case Token::Value::ADD:
+      return Bytecode::kAdd;
+    case Token::Value::SUB:
+      return Bytecode::kSub;
+    case Token::Value::MUL:
+      return Bytecode::kMul;
+    case Token::Value::DIV:
+      return Bytecode::kDiv;
+    case Token::Value::MOD:
+      return Bytecode::kMod;
+    case Token::Value::BIT_OR:
+      return Bytecode::kBitwiseOr;
+    case Token::Value::BIT_XOR:
+      return Bytecode::kBitwiseXor;
+    case Token::Value::BIT_AND:
+      return Bytecode::kBitwiseAnd;
+    case Token::Value::SHL:
+      return Bytecode::kShiftLeft;
+    case Token::Value::SAR:
+      return Bytecode::kShiftRight;
+    case Token::Value::SHR:
+      return Bytecode::kShiftRightLogical;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
+  switch (op) {
+    case Token::Value::ADD:
+      return Bytecode::kInc;
+    case Token::Value::SUB:
+      return Bytecode::kDec;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
+  switch (op) {
+    case Token::Value::EQ:
+      return Bytecode::kTestEqual;
+    case Token::Value::NE:
+      return Bytecode::kTestNotEqual;
+    case Token::Value::EQ_STRICT:
+      return Bytecode::kTestEqualStrict;
+    case Token::Value::NE_STRICT:
+      return Bytecode::kTestNotEqualStrict;
+    case Token::Value::LT:
+      return Bytecode::kTestLessThan;
+    case Token::Value::GT:
+      return Bytecode::kTestGreaterThan;
+    case Token::Value::LTE:
+      return Bytecode::kTestLessThanOrEqual;
+    case Token::Value::GTE:
+      return Bytecode::kTestGreaterThanOrEqual;
+    case Token::Value::INSTANCEOF:
+      return Bytecode::kTestInstanceOf;
+    case Token::Value::IN:
+      return Bytecode::kTestIn;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kLoadICSloppy:
+      return Bytecode::kLoadICSloppyWide;
+    case Bytecode::kLoadICStrict:
+      return Bytecode::kLoadICStrictWide;
+    case Bytecode::kKeyedLoadICSloppy:
+      return Bytecode::kKeyedLoadICSloppyWide;
+    case Bytecode::kKeyedLoadICStrict:
+      return Bytecode::kKeyedLoadICStrictWide;
+    case Bytecode::kStoreICSloppy:
+      return Bytecode::kStoreICSloppyWide;
+    case Bytecode::kStoreICStrict:
+      return Bytecode::kStoreICStrictWide;
+    case Bytecode::kKeyedStoreICSloppy:
+      return Bytecode::kKeyedStoreICSloppyWide;
+    case Bytecode::kKeyedStoreICStrict:
+      return Bytecode::kKeyedStoreICStrictWide;
+    case Bytecode::kLdaGlobalSloppy:
+      return Bytecode::kLdaGlobalSloppyWide;
+    case Bytecode::kLdaGlobalStrict:
+      return Bytecode::kLdaGlobalStrictWide;
+    case Bytecode::kLdaGlobalInsideTypeofSloppy:
+      return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
+    case Bytecode::kLdaGlobalInsideTypeofStrict:
+      return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+    case Bytecode::kStaGlobalSloppy:
+      return Bytecode::kStaGlobalSloppyWide;
+    case Bytecode::kStaGlobalStrict:
+      return Bytecode::kStaGlobalStrictWide;
+    case Bytecode::kLdaLookupSlot:
+      return Bytecode::kLdaLookupSlotWide;
+    case Bytecode::kLdaLookupSlotInsideTypeof:
+      return Bytecode::kLdaLookupSlotInsideTypeofWide;
+    case Bytecode::kStaLookupSlotStrict:
+      return Bytecode::kStaLookupSlotStrictWide;
+    case Bytecode::kStaLookupSlotSloppy:
+      return Bytecode::kStaLookupSlotSloppyWide;
+    default:
+      UNREACHABLE();
+      return static_cast<Bytecode>(-1);
+  }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kLoadICSloppy;
+    case STRICT:
+      return Bytecode::kLoadICStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kKeyedLoadICSloppy;
+    case STRICT:
+      return Bytecode::kKeyedLoadICStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kStoreICSloppy;
+    case STRICT:
+      return Bytecode::kStoreICStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kKeyedStoreICSloppy;
+    case STRICT:
+      return Bytecode::kKeyedStoreICStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
+                                                     TypeofMode typeof_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return typeof_mode == INSIDE_TYPEOF
+                 ? Bytecode::kLdaGlobalInsideTypeofSloppy
+                 : Bytecode::kLdaGlobalSloppy;
+    case STRICT:
+      return typeof_mode == INSIDE_TYPEOF
+                 ? Bytecode::kLdaGlobalInsideTypeofStrict
+                 : Bytecode::kLdaGlobalStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kStaGlobalSloppy;
+    case STRICT:
+      return Bytecode::kStaGlobalStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kStaLookupSlotSloppy;
+    case STRICT:
+      return Bytecode::kStaLookupSlotStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
+    CreateArgumentsType type) {
+  switch (type) {
+    case CreateArgumentsType::kMappedArguments:
+      return Bytecode::kCreateMappedArguments;
+    case CreateArgumentsType::kUnmappedArguments:
+      return Bytecode::kCreateUnmappedArguments;
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kDeletePropertySloppy;
+    case STRICT:
+      return Bytecode::kDeletePropertyStrict;
+    case STRONG:
+      UNIMPLEMENTED();
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
+  return kMinUInt8 <= value && value <= kMaxUInt8;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
+  return value <= static_cast<size_t>(kMaxUInt8);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
+  return kMinInt8 <= value && value <= kMaxInt8;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
+  return kMinUInt16 <= value && value <= kMaxUInt16;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
+  return value <= static_cast<size_t>(kMaxUInt16);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
+  return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
+  return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
new file mode 100644
index 0000000..7c23dc3
--- /dev/null
+++ b/src/interpreter/bytecode-array-builder.h
@@ -0,0 +1,387 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
+
+#include "src/ast/ast.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace interpreter {
+
+class BytecodeLabel;
+class ConstantArrayBuilder;
+class Register;
+
+// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
+// when rest parameters implementation has settled down.
+enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
+
+class BytecodeArrayBuilder final {
+ public:
+  BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+  ~BytecodeArrayBuilder();
+
+  Handle<BytecodeArray> ToBytecodeArray();
+
+  // Set the number of parameters expected by function.
+  void set_parameter_count(int number_of_params);
+  int parameter_count() const {
+    DCHECK_GE(parameter_count_, 0);
+    return parameter_count_;
+  }
+
+  // Set the number of locals required for bytecode array.
+  void set_locals_count(int number_of_locals);
+  int locals_count() const {
+    DCHECK_GE(local_register_count_, 0);
+    return local_register_count_;
+  }
+
+  // Set number of contexts required for bytecode array.
+  void set_context_count(int number_of_contexts);
+  int context_count() const {
+    DCHECK_GE(context_register_count_, 0);
+    return context_register_count_;
+  }
+
+  Register first_context_register() const;
+  Register last_context_register() const;
+
+  // Returns the number of fixed (non-temporary) registers.
+  int fixed_register_count() const { return context_count() + locals_count(); }
+
+  Register Parameter(int parameter_index) const;
+
+  // Return true if the register |reg| represents a parameter or a
+  // local.
+  bool RegisterIsParameterOrLocal(Register reg) const;
+
+  // Return true if the register |reg| represents a temporary register.
+  bool RegisterIsTemporary(Register reg) const;
+
+  // Constant loads to accumulator.
+  BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+  BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
+  BytecodeArrayBuilder& LoadUndefined();
+  BytecodeArrayBuilder& LoadNull();
+  BytecodeArrayBuilder& LoadTheHole();
+  BytecodeArrayBuilder& LoadTrue();
+  BytecodeArrayBuilder& LoadFalse();
+  BytecodeArrayBuilder& LoadBooleanConstant(bool value);
+
+  // Global loads to the accumulator and stores from the accumulator.
+  BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
+                                   LanguageMode language_mode,
+                                   TypeofMode typeof_mode);
+  BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
+                                    int feedback_slot,
+                                    LanguageMode language_mode);
+
+  // Load the object at |slot_index| in |context| into the accumulator.
+  BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index);
+
+  // Stores the object in the accumulator into |slot_index| of |context|.
+  BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index);
+
+  // Register-accumulator transfers.
+  BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
+  BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
+
+  // Register-register transfer.
+  BytecodeArrayBuilder& MoveRegister(Register from, Register to);
+  BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
+
+  // Named load property.
+  BytecodeArrayBuilder& LoadNamedProperty(Register object,
+                                          const Handle<String> name,
+                                          int feedback_slot,
+                                          LanguageMode language_mode);
+  // Keyed load property. The key should be in the accumulator.
+  BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
+                                          LanguageMode language_mode);
+
+  // Store properties. The value to be stored should be in the accumulator.
+  BytecodeArrayBuilder& StoreNamedProperty(Register object,
+                                           const Handle<String> name,
+                                           int feedback_slot,
+                                           LanguageMode language_mode);
+  BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
+                                           int feedback_slot,
+                                           LanguageMode language_mode);
+
+  // Lookup the variable with |name|.
+  BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+                                       TypeofMode typeof_mode);
+
+  // Store value in the accumulator into the variable with |name|.
+  BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+                                        LanguageMode language_mode);
+
+  // Create a new closure for the SharedFunctionInfo.
+  BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
+                                      PretenureFlag tenured);
+
+  // Create a new arguments object in the accumulator.
+  BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
+
+  // Literals creation.  Constant elements should be in the accumulator.
+  BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+                                            int literal_index, int flags);
+  BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+                                           int literal_index, int flags);
+  BytecodeArrayBuilder& CreateObjectLiteral(
+      Handle<FixedArray> constant_properties, int literal_index, int flags);
+
+  // Push the context in accumulator as the new context, and store in register
+  // |context|.
+  BytecodeArrayBuilder& PushContext(Register context);
+
+  // Pop the current context and replace with |context|.
+  BytecodeArrayBuilder& PopContext(Register context);
+
+  // Call a JS function. The JSFunction or Callable to be called should be in
+  // |callable|, the receiver should be in |receiver| and all subsequent
+  // arguments should be in registers <receiver + 1> to
+  // <receiver + 1 + arg_count>.
+  BytecodeArrayBuilder& Call(Register callable, Register receiver,
+                             size_t arg_count, int feedback_slot);
+
+  // Call the new operator. The |constructor| register is followed by
+  // |arg_count| consecutive registers containing arguments to be
+  // applied to the constructor.
+  BytecodeArrayBuilder& New(Register constructor, Register first_arg,
+                            size_t arg_count);
+
+  // Call the runtime function with |function_id|. The first argument should be
+  // in |first_arg| and all subsequent arguments should be in registers
+  // <first_arg + 1> to <first_arg + 1 + arg_count>.
+  BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
+                                    Register first_arg, size_t arg_count);
+
+  // Call the runtime function with |function_id| that returns a pair of values.
+  // The first argument should be in |first_arg| and all subsequent arguments
+  // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+  // return values will be returned in <first_return> and <first_return + 1>.
+  BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+                                           Register first_arg, size_t arg_count,
+                                           Register first_return);
+
+  // Call the JS runtime function with |context_index|. The the receiver should
+  // be in |receiver| and all subsequent arguments should be in registers
+  // <receiver + 1> to <receiver + 1 + arg_count>.
+  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
+                                      size_t arg_count);
+
+  // Operators (register holds the lhs value, accumulator holds the rhs value).
+  BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
+                                        Strength strength);
+
+  // Count Operators (value stored in accumulator).
+  BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+
+  // Unary Operators.
+  BytecodeArrayBuilder& LogicalNot();
+  BytecodeArrayBuilder& TypeOf();
+
+  // Deletes property from an object. This expects that accumulator contains
+  // the key to be deleted and the register contains a reference to the object.
+  BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+  BytecodeArrayBuilder& DeleteLookupSlot();
+
+  // Tests.
+  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
+                                         Strength strength);
+
+  // Casts.
+  BytecodeArrayBuilder& CastAccumulatorToBoolean();
+  BytecodeArrayBuilder& CastAccumulatorToJSObject();
+  BytecodeArrayBuilder& CastAccumulatorToName();
+  BytecodeArrayBuilder& CastAccumulatorToNumber();
+
+  // Flow Control.
+  BytecodeArrayBuilder& Bind(BytecodeLabel* label);
+  BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+
+  BytecodeArrayBuilder& Jump(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+
+  BytecodeArrayBuilder& Throw();
+  BytecodeArrayBuilder& Return();
+
+  // Complex flow control.
+  BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
+                                     Register cache_length);
+  BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+  BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
+                                  Register cache_array, Register index);
+  BytecodeArrayBuilder& ForInStep(Register index);
+
+  // Accessors
+  Zone* zone() const { return zone_; }
+
+ private:
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+  Isolate* isolate() const { return isolate_; }
+  ConstantArrayBuilder* constant_array_builder() {
+    return &constant_array_builder_;
+  }
+  const ConstantArrayBuilder* constant_array_builder() const {
+    return &constant_array_builder_;
+  }
+
+  static Bytecode BytecodeForBinaryOperation(Token::Value op);
+  static Bytecode BytecodeForCountOperation(Token::Value op);
+  static Bytecode BytecodeForCompareOperation(Token::Value op);
+  static Bytecode BytecodeForWideOperands(Bytecode bytecode);
+  static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
+  static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
+  static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+  static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
+                                        TypeofMode typeof_mode);
+  static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
+  static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
+  static Bytecode BytecodeForDelete(LanguageMode language_mode);
+
+  static bool FitsInIdx8Operand(int value);
+  static bool FitsInIdx8Operand(size_t value);
+  static bool FitsInImm8Operand(int value);
+  static bool FitsInIdx16Operand(int value);
+  static bool FitsInIdx16Operand(size_t value);
+  static bool FitsInReg8Operand(Register value);
+  static bool FitsInReg16Operand(Register value);
+
+  static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
+  static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
+  static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
+
+  Register MapRegister(Register reg);
+  Register MapRegisters(Register reg, Register args_base, int args_length = 1);
+
+  template <size_t N>
+  INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2, uint32_t operand3);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+  void Output(Bytecode bytecode, uint32_t operand0);
+  void Output(Bytecode bytecode);
+
+  BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
+                                   BytecodeLabel* label);
+  void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
+                 const ZoneVector<uint8_t>::iterator& jump_location);
+  void PatchIndirectJumpWith8BitOperand(
+      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+  void PatchIndirectJumpWith16BitOperand(
+      const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+
+  void LeaveBasicBlock();
+  void EnsureReturn();
+
+  bool OperandIsValid(Bytecode bytecode, int operand_index,
+                      uint32_t operand_value) const;
+  bool LastBytecodeInSameBlock() const;
+
+  bool NeedToBooleanCast();
+  bool IsRegisterInAccumulator(Register reg);
+
+  bool RegisterIsValid(Register reg) const;
+
+  // Temporary register management.
+  int BorrowTemporaryRegister();
+  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
+  void ReturnTemporaryRegister(int reg_index);
+  int PrepareForConsecutiveTemporaryRegisters(size_t count);
+  void BorrowConsecutiveTemporaryRegister(int reg_index);
+  bool TemporaryRegisterIsLive(Register reg) const;
+
+  Register first_temporary_register() const;
+  Register last_temporary_register() const;
+
+  // Gets a constant pool entry for the |object|.
+  size_t GetConstantPoolEntry(Handle<Object> object);
+
+  Isolate* isolate_;
+  Zone* zone_;
+  ZoneVector<uint8_t> bytecodes_;
+  bool bytecode_generated_;
+  ConstantArrayBuilder constant_array_builder_;
+  size_t last_block_end_;
+  size_t last_bytecode_start_;
+  bool exit_seen_in_block_;
+  int unbound_jumps_;
+
+  int parameter_count_;
+  int local_register_count_;
+  int context_register_count_;
+  int temporary_register_count_;
+  ZoneSet<int> free_temporaries_;
+
+  class PreviousBytecodeHelper;
+  friend class BytecodeRegisterAllocator;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
+};
+
+
+// A label representing a branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode
+// array. For labels that are forward references there can be at most
+// one reference whilst it is unbound.
+class BytecodeLabel final {
+ public:
+  BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+
+  bool is_bound() const { return bound_; }
+  size_t offset() const { return offset_; }
+
+ private:
+  static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+  void bind_to(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset);
+    offset_ = offset;
+    bound_ = true;
+  }
+
+  void set_referrer(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
+    offset_ = offset;
+  }
+
+  bool is_forward_target() const {
+    return offset() != kInvalidOffset && !is_bound();
+  }
+
+  // There are three states for a label:
+  //                    bound_   offset_
+  //  UNSET             false    kInvalidOffset
+  //  FORWARD_TARGET    false    Offset of referring jump
+  //  BACKWARD_TARGET    true    Offset of label in bytecode array when bound
+  bool bound_;
+  size_t offset_;
+
+  friend class BytecodeArrayBuilder;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
new file mode 100644
index 0000000..d09d72f
--- /dev/null
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -0,0 +1,123 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-iterator.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayIterator::BytecodeArrayIterator(
+    Handle<BytecodeArray> bytecode_array)
+    : bytecode_array_(bytecode_array), bytecode_offset_(0) {}
+
+
+void BytecodeArrayIterator::Advance() {
+  bytecode_offset_ += Bytecodes::Size(current_bytecode());
+}
+
+
+bool BytecodeArrayIterator::done() const {
+  return bytecode_offset_ >= bytecode_array()->length();
+}
+
+
+Bytecode BytecodeArrayIterator::current_bytecode() const {
+  DCHECK(!done());
+  uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+  return interpreter::Bytecodes::FromByte(current_byte);
+}
+
+
+int BytecodeArrayIterator::current_bytecode_size() const {
+  return Bytecodes::Size(current_bytecode());
+}
+
+
+uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
+                                              OperandType operand_type) const {
+  DCHECK_GE(operand_index, 0);
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  DCHECK_EQ(operand_type,
+            Bytecodes::GetOperandType(current_bytecode(), operand_index));
+  uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
+  switch (Bytecodes::SizeOfOperand(operand_type)) {
+    default:
+    case OperandSize::kNone:
+      UNREACHABLE();
+    case OperandSize::kByte:
+      return static_cast<uint32_t>(*operand_start);
+    case OperandSize::kShort:
+      return ReadUnalignedUInt16(operand_start);
+  }
+}
+
+
+int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+  uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
+  return static_cast<int8_t>(operand);
+}
+
+
+int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+  OperandSize size =
+      Bytecodes::GetOperandSize(current_bytecode(), operand_index);
+  OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
+                                                  : OperandType::kCount16;
+  uint32_t operand = GetRawOperand(operand_index, type);
+  return static_cast<int>(operand);
+}
+
+
+int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kIdx8 ||
+         operand_type == OperandType::kIdx16);
+  uint32_t operand = GetRawOperand(operand_index, operand_type);
+  return static_cast<int>(operand);
+}
+
+
+Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kReg8 ||
+         operand_type == OperandType::kRegPair8 ||
+         operand_type == OperandType::kMaybeReg8 ||
+         operand_type == OperandType::kReg16);
+  uint32_t operand = GetRawOperand(operand_index, operand_type);
+  return Register::FromOperand(operand);
+}
+
+
+Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
+    int operand_index) const {
+  Handle<FixedArray> constants = handle(bytecode_array()->constant_pool());
+  return FixedArray::get(constants, GetIndexOperand(operand_index));
+}
+
+
+int BytecodeArrayIterator::GetJumpTargetOffset() const {
+  Bytecode bytecode = current_bytecode();
+  if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+    int relative_offset = GetImmediateOperand(0);
+    return current_offset() + relative_offset;
+  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
+             interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+    Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+    return current_offset() + smi->value();
+  } else {
+    UNREACHABLE();
+    return kMinInt;
+  }
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
new file mode 100644
index 0000000..e67fa97
--- /dev/null
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -0,0 +1,55 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+
+#include "src/handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayIterator {
+ public:
+  explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
+
+  void Advance();
+  bool done() const;
+  Bytecode current_bytecode() const;
+  int current_bytecode_size() const;
+  int current_offset() const { return bytecode_offset_; }
+  const Handle<BytecodeArray>& bytecode_array() const {
+    return bytecode_array_;
+  }
+
+  int8_t GetImmediateOperand(int operand_index) const;
+  int GetIndexOperand(int operand_index) const;
+  int GetCountOperand(int operand_index) const;
+  Register GetRegisterOperand(int operand_index) const;
+  Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+
+  // Get the raw byte for the given operand. Note: you should prefer using the
+  // typed versions above which cast the return to an appropriate type.
+  uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
+
+  // Returns the absolute offset of the branch target at the current
+  // bytecode. It is an error to call this method if the bytecode is
+  // not for a jump or conditional jump.
+  int GetJumpTargetOffset() const;
+
+ private:
+  Handle<BytecodeArray> bytecode_array_;
+  int bytecode_offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_GRAPH_ITERATOR_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
new file mode 100644
index 0000000..959e155
--- /dev/null
+++ b/src/interpreter/bytecode-generator.cc
@@ -0,0 +1,2182 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-generator.h"
+
+#include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/control-flow-builders.h"
+#include "src/objects.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body, allowing pushing and
+// popping of the current {context_register} during visitation.
+class BytecodeGenerator::ContextScope BASE_EMBEDDED {
+ public:
+  ContextScope(BytecodeGenerator* generator, Scope* scope,
+               bool should_pop_context = true)
+      : generator_(generator),
+        scope_(scope),
+        outer_(generator_->execution_context()),
+        register_(generator_->NextContextRegister()),
+        depth_(0),
+        should_pop_context_(should_pop_context) {
+    if (outer_) {
+      depth_ = outer_->depth_ + 1;
+      generator_->builder()->PushContext(register_);
+    }
+    generator_->set_execution_context(this);
+  }
+
+  ~ContextScope() {
+    if (outer_ && should_pop_context_) {
+      generator_->builder()->PopContext(outer_->reg());
+    }
+    generator_->set_execution_context(outer_);
+  }
+
+  // Returns the depth of the given |scope| for the current execution context.
+  int ContextChainDepth(Scope* scope) {
+    return scope_->ContextChainLength(scope);
+  }
+
+  // Returns the execution context at |depth| in the current context chain if it
+  // is a function local execution context, otherwise returns nullptr.
+  ContextScope* Previous(int depth) {
+    if (depth > depth_) {
+      return nullptr;
+    }
+
+    ContextScope* previous = this;
+    for (int i = depth; i > 0; --i) {
+      previous = previous->outer_;
+    }
+    return previous;
+  }
+
+  Scope* scope() const { return scope_; }
+  Register reg() const { return register_; }
+
+ private:
+  BytecodeGenerator* generator_;
+  Scope* scope_;
+  ContextScope* outer_;
+  Register register_;
+  int depth_;
+  bool should_pop_context_;
+};
+
+
+// Scoped class for tracking control statements entered by the
+// visitor. The pattern derives AstGraphBuilder::ControlScope.
+class BytecodeGenerator::ControlScope BASE_EMBEDDED {
+ public:
+  explicit ControlScope(BytecodeGenerator* generator)
+      : generator_(generator), outer_(generator->execution_control()) {
+    generator_->set_execution_control(this);
+  }
+  virtual ~ControlScope() { generator_->set_execution_control(outer()); }
+
+  void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
+  void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+
+ protected:
+  enum Command { CMD_BREAK, CMD_CONTINUE };
+  void PerformCommand(Command command, Statement* statement);
+  virtual bool Execute(Command command, Statement* statement) = 0;
+
+  BytecodeGenerator* generator() const { return generator_; }
+  ControlScope* outer() const { return outer_; }
+
+ private:
+  BytecodeGenerator* generator_;
+  ControlScope* outer_;
+
+  DISALLOW_COPY_AND_ASSIGN(ControlScope);
+};
+
+
+// Scoped class for enabling break inside blocks and switch blocks.
+class BytecodeGenerator::ControlScopeForBreakable final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForBreakable(BytecodeGenerator* generator,
+                           BreakableStatement* statement,
+                           BreakableControlFlowBuilder* control_builder)
+      : ControlScope(generator),
+        statement_(statement),
+        control_builder_(control_builder) {}
+
+ protected:
+  virtual bool Execute(Command command, Statement* statement) {
+    if (statement != statement_) return false;
+    switch (command) {
+      case CMD_BREAK:
+        control_builder_->Break();
+        return true;
+      case CMD_CONTINUE:
+        break;
+    }
+    return false;
+  }
+
+ private:
+  Statement* statement_;
+  BreakableControlFlowBuilder* control_builder_;
+};
+
+
+// Scoped class for enabling 'break' and 'continue' in iteration
+// constructs, e.g. do...while, while..., for...
+class BytecodeGenerator::ControlScopeForIteration final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForIteration(BytecodeGenerator* generator,
+                           IterationStatement* statement,
+                           LoopBuilder* loop_builder)
+      : ControlScope(generator),
+        statement_(statement),
+        loop_builder_(loop_builder) {}
+
+ protected:
+  virtual bool Execute(Command command, Statement* statement) {
+    if (statement != statement_) return false;
+    switch (command) {
+      case CMD_BREAK:
+        loop_builder_->Break();
+        return true;
+      case CMD_CONTINUE:
+        loop_builder_->Continue();
+        return true;
+    }
+    return false;
+  }
+
+ private:
+  Statement* statement_;
+  LoopBuilder* loop_builder_;
+};
+
+
+void BytecodeGenerator::ControlScope::PerformCommand(Command command,
+                                                     Statement* statement) {
+  ControlScope* current = this;
+  do {
+    if (current->Execute(command, statement)) return;
+    current = current->outer();
+  } while (current != nullptr);
+  UNREACHABLE();
+}
+
+
+class BytecodeGenerator::RegisterAllocationScope {
+ public:
+  explicit RegisterAllocationScope(BytecodeGenerator* generator)
+      : generator_(generator),
+        outer_(generator->register_allocator()),
+        allocator_(builder()) {
+    generator_->set_register_allocator(this);
+  }
+
+  virtual ~RegisterAllocationScope() {
+    generator_->set_register_allocator(outer_);
+  }
+
+  Register NewRegister() {
+    RegisterAllocationScope* current_scope = generator()->register_allocator();
+    if ((current_scope == this) ||
+        (current_scope->outer() == this &&
+         !current_scope->allocator_.HasConsecutiveAllocations())) {
+      // Regular case - Allocating registers in current or outer context.
+      // VisitForRegisterValue allocates register in outer context.
+      return allocator_.NewRegister();
+    } else {
+      // If it is required to allocate a register other than current or outer
+      // scopes, allocate a new temporary register. It might be expensive to
+      // walk the full context chain and compute the list of consecutive
+      // reservations in the innerscopes.
+      UNIMPLEMENTED();
+      return Register(-1);
+    }
+  }
+
+  void PrepareForConsecutiveAllocations(size_t count) {
+    allocator_.PrepareForConsecutiveAllocations(count);
+  }
+
+  Register NextConsecutiveRegister() {
+    return allocator_.NextConsecutiveRegister();
+  }
+
+  bool RegisterIsAllocatedInThisScope(Register reg) const {
+    return allocator_.RegisterIsAllocatedInThisScope(reg);
+  }
+
+  RegisterAllocationScope* outer() const { return outer_; }
+
+ private:
+  BytecodeGenerator* generator() const { return generator_; }
+  BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+  BytecodeGenerator* generator_;
+  RegisterAllocationScope* outer_;
+  BytecodeRegisterAllocator allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
+};
+
+
+// Scoped base class for determining where the result of an expression
+// is stored.
+class BytecodeGenerator::ExpressionResultScope {
+ public:
+  ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
+      : generator_(generator),
+        kind_(kind),
+        outer_(generator->execution_result()),
+        allocator_(generator),
+        result_identified_(false) {
+    generator_->set_execution_result(this);
+  }
+
+  virtual ~ExpressionResultScope() {
+    generator_->set_execution_result(outer_);
+    DCHECK(result_identified());
+  }
+
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+
+  virtual void SetResultInAccumulator() = 0;
+  virtual void SetResultInRegister(Register reg) = 0;
+
+ protected:
+  ExpressionResultScope* outer() const { return outer_; }
+  BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+  const RegisterAllocationScope* allocator() const { return &allocator_; }
+
+  void set_result_identified() {
+    DCHECK(!result_identified());
+    result_identified_ = true;
+  }
+
+  bool result_identified() const { return result_identified_; }
+
+ private:
+  BytecodeGenerator* generator_;
+  Expression::Context kind_;
+  ExpressionResultScope* outer_;
+  RegisterAllocationScope allocator_;
+  bool result_identified_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
+};
+
+
+// Scoped class used when the result of the current expression is not
+// expected to produce a result.
+class BytecodeGenerator::EffectResultScope final
+    : public ExpressionResultScope {
+ public:
+  explicit EffectResultScope(BytecodeGenerator* generator)
+      : ExpressionResultScope(generator, Expression::kEffect) {
+    set_result_identified();
+  }
+
+  virtual void SetResultInAccumulator() {}
+  virtual void SetResultInRegister(Register reg) {}
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into the interpreter's accumulator register.
+class BytecodeGenerator::AccumulatorResultScope final
+    : public ExpressionResultScope {
+ public:
+  explicit AccumulatorResultScope(BytecodeGenerator* generator)
+      : ExpressionResultScope(generator, Expression::kValue) {}
+
+  virtual void SetResultInAccumulator() { set_result_identified(); }
+
+  virtual void SetResultInRegister(Register reg) {
+    builder()->LoadAccumulatorWithRegister(reg);
+    set_result_identified();
+  }
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into an interpreter register.
+class BytecodeGenerator::RegisterResultScope final
+    : public ExpressionResultScope {
+ public:
+  explicit RegisterResultScope(BytecodeGenerator* generator)
+      : ExpressionResultScope(generator, Expression::kValue) {}
+
+  virtual void SetResultInAccumulator() {
+    result_register_ = allocator()->outer()->NewRegister();
+    builder()->StoreAccumulatorInRegister(result_register_);
+    set_result_identified();
+  }
+
+  virtual void SetResultInRegister(Register reg) {
+    DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
+           (builder()->RegisterIsTemporary(reg) &&
+            !allocator()->RegisterIsAllocatedInThisScope(reg)));
+    result_register_ = reg;
+    set_result_identified();
+  }
+
+  Register ResultRegister() const { return result_register_; }
+
+ private:
+  Register result_register_;
+};
+
+
+BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
+    : isolate_(isolate),
+      zone_(zone),
+      builder_(isolate, zone),
+      info_(nullptr),
+      scope_(nullptr),
+      globals_(0, zone),
+      execution_control_(nullptr),
+      execution_context_(nullptr),
+      execution_result_(nullptr),
+      register_allocator_(nullptr) {
+  InitializeAstVisitor(isolate);
+}
+
+
+Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
+  set_info(info);
+  set_scope(info->scope());
+
+  // Initialize the incoming context.
+  ContextScope incoming_context(this, scope(), false);
+
+  builder()->set_parameter_count(info->num_parameters_including_this());
+  builder()->set_locals_count(scope()->num_stack_slots());
+  builder()->set_context_count(scope()->MaxNestedContextChainLength());
+
+  // Build function context only if there are context allocated variables.
+  if (scope()->NeedsContext()) {
+    // Push a new inner context scope for the function.
+    VisitNewLocalFunctionContext();
+    ContextScope local_function_context(this, scope(), false);
+    VisitBuildLocalActivationContext();
+    MakeBytecodeBody();
+  } else {
+    MakeBytecodeBody();
+  }
+
+  set_scope(nullptr);
+  set_info(nullptr);
+  return builder_.ToBytecodeArray();
+}
+
+
+void BytecodeGenerator::MakeBytecodeBody() {
+  // Build the arguments object if it is used.
+  VisitArgumentsObject(scope()->arguments());
+
+  // TODO(mythria): Build rest arguments array if it is used.
+  int rest_index;
+  if (scope()->rest_parameter(&rest_index)) {
+    UNIMPLEMENTED();
+  }
+
+  // Build assignment to {.this_function} variable if it is used.
+  VisitThisFunctionVariable(scope()->this_function_var());
+
+  // Build assignment to {new.target} variable if it is used.
+  VisitNewTargetVariable(scope()->new_target_var());
+
+  // TODO(rmcilroy): Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    UNIMPLEMENTED();
+  }
+
+  // Visit illegal re-declaration and bail out if it exists.
+  if (scope()->HasIllegalRedeclaration()) {
+    Visit(scope()->GetIllegalRedeclaration());
+    return;
+  }
+
+  // Visit declarations within the function scope.
+  VisitDeclarations(scope()->declarations());
+
+  // Visit statements in the function body.
+  VisitStatements(info()->literal()->body());
+}
+
+
+void BytecodeGenerator::VisitBlock(Block* stmt) {
+  BlockBuilder block_builder(this->builder());
+  ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+
+  if (stmt->scope() == NULL) {
+    // Visit statements in the same scope, no declarations.
+    VisitStatements(stmt->statements());
+  } else {
+    // Visit declarations and statements in a block scope.
+    if (stmt->scope()->NeedsContext()) {
+      VisitNewLocalBlockContext(stmt->scope());
+      ContextScope scope(this, stmt->scope());
+      VisitDeclarations(stmt->scope()->declarations());
+      VisitStatements(stmt->statements());
+    } else {
+      VisitDeclarations(stmt->scope()->declarations());
+      VisitStatements(stmt->statements());
+    }
+  }
+  if (stmt->labels() != nullptr) block_builder.EndBlock();
+}
+
+
+void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  VariableMode mode = decl->mode();
+  // Const and let variables are initialized with the hole so that we can
+  // check that they are only assigned once.
+  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      Handle<Oddball> value = variable->binding_needs_init()
+                                  ? isolate()->factory()->the_hole_value()
+                                  : isolate()->factory()->undefined_value();
+      globals()->push_back(variable->name());
+      globals()->push_back(value);
+      break;
+    }
+    case VariableLocation::LOCAL:
+      if (hole_init) {
+        Register destination(variable->index());
+        builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+      }
+      break;
+    case VariableLocation::PARAMETER:
+      if (hole_init) {
+        // The parameter indices are shifted by 1 (receiver is variable
+        // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+        Register destination(builder()->Parameter(variable->index() + 1));
+        builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+      }
+      break;
+    case VariableLocation::CONTEXT:
+      if (hole_init) {
+        builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
+                                                  variable->index());
+      }
+      break;
+    case VariableLocation::LOOKUP:
+      UNIMPLEMENTED();
+      break;
+  }
+}
+
+
+void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+          decl->fun(), info()->script(), info());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals()->push_back(variable->name());
+      globals()->push_back(function);
+      break;
+    }
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL: {
+      VisitForAccumulatorValue(decl->fun());
+      VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+      break;
+    }
+    case VariableLocation::CONTEXT: {
+      DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
+      VisitForAccumulatorValue(decl->fun());
+      builder()->StoreContextSlot(execution_context()->reg(),
+                                  variable->index());
+      break;
+    }
+    case VariableLocation::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
+  RegisterAllocationScope register_scope(this);
+  DCHECK(globals()->empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (globals()->empty()) return;
+  int array_index = 0;
+  Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
+      static_cast<int>(globals()->size()), TENURED);
+  for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
+  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                      DeclareGlobalsLanguageMode::encode(language_mode());
+
+  Register pairs = register_allocator()->NewRegister();
+  builder()->LoadLiteral(data);
+  builder()->StoreAccumulatorInRegister(pairs);
+
+  Register flags = register_allocator()->NewRegister();
+  builder()->LoadLiteral(Smi::FromInt(encoded_flags));
+  builder()->StoreAccumulatorInRegister(flags);
+  DCHECK(flags.index() == pairs.index() + 1);
+
+  builder()->CallRuntime(Runtime::kDeclareGlobals, pairs, 2);
+  globals()->clear();
+}
+
+
+void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    // Allocate an outer register allocations scope for the statement.
+    RegisterAllocationScope allocation_scope(this);
+    Statement* stmt = statements->at(i);
+    Visit(stmt);
+    if (stmt->IsJump()) break;
+  }
+}
+
+
+void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  BytecodeLabel else_label, end_label;
+  if (stmt->condition()->ToBooleanIsTrue()) {
+    // Generate then block unconditionally as always true.
+    Visit(stmt->then_statement());
+  } else if (stmt->condition()->ToBooleanIsFalse()) {
+    // Generate else block unconditionally if it exists.
+    if (stmt->HasElseStatement()) {
+      Visit(stmt->else_statement());
+    }
+  } else {
+    // TODO(oth): If then statement is BreakStatement or
+    // ContinueStatement we can reduce number of generated
+    // jump/jump_ifs here. See BasicLoops test.
+    VisitForAccumulatorValue(stmt->condition());
+    builder()->JumpIfFalse(&else_label);
+    Visit(stmt->then_statement());
+    if (stmt->HasElseStatement()) {
+      builder()->Jump(&end_label);
+      builder()->Bind(&else_label);
+      Visit(stmt->else_statement());
+    } else {
+      builder()->Bind(&else_label);
+    }
+    builder()->Bind(&end_label);
+  }
+}
+
+
+void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
+    SloppyBlockFunctionStatement* stmt) {
+  Visit(stmt->statement());
+}
+
+
+void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  execution_control()->Continue(stmt->target());
+}
+
+
+void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  execution_control()->Break(stmt->target());
+}
+
+
+void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  VisitForAccumulatorValue(stmt->expression());
+  builder()->Return();
+}
+
+
+void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  // We need this scope because we visit for register values. We have to
+  // maintain a execution result scope where registers can be allocated.
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  SwitchBuilder switch_builder(builder(), clauses->length());
+  ControlScopeForBreakable scope(this, stmt, &switch_builder);
+  int default_index = -1;
+
+  // Keep the switch value in a register until a case matches.
+  Register tag = VisitForRegisterValue(stmt->tag());
+
+  // Iterate over all cases and create nodes for label comparison.
+  BytecodeLabel done_label;
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+
+    // The default is not a test, remember index.
+    if (clause->is_default()) {
+      default_index = i;
+      continue;
+    }
+
+    // Perform label comparison as if via '===' with tag.
+    VisitForAccumulatorValue(clause->label());
+    builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
+                                language_mode_strength());
+    switch_builder.Case(i);
+  }
+
+  if (default_index >= 0) {
+    // Emit default jump if there is a default case.
+    switch_builder.DefaultAt(default_index);
+  } else {
+    // Otherwise if we have reached here none of the cases matched, so jump to
+    // done.
+    builder()->Jump(&done_label);
+  }
+
+  // Iterate over all cases and create the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    switch_builder.SetCaseTarget(i);
+    VisitStatements(clause->statements());
+  }
+  builder()->Bind(&done_label);
+
+  switch_builder.SetBreakTarget(done_label);
+}
+
+
+void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
+  // Handled entirely in VisitSwitchStatement.
+  UNREACHABLE();
+}
+
+
+void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+  loop_builder.LoopHeader();
+  if (stmt->cond()->ToBooleanIsFalse()) {
+    Visit(stmt->body());
+    loop_builder.Condition();
+  } else if (stmt->cond()->ToBooleanIsTrue()) {
+    loop_builder.Condition();
+    Visit(stmt->body());
+    loop_builder.JumpToHeader();
+  } else {
+    Visit(stmt->body());
+    loop_builder.Condition();
+    VisitForAccumulatorValue(stmt->cond());
+    loop_builder.JumpToHeaderIfTrue();
+  }
+  loop_builder.EndLoop();
+}
+
+
+void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+  if (stmt->cond()->ToBooleanIsFalse()) {
+    // If the condition is false there is no need to generate the loop.
+    return;
+  }
+
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+  loop_builder.LoopHeader();
+  loop_builder.Condition();
+  if (!stmt->cond()->ToBooleanIsTrue()) {
+    VisitForAccumulatorValue(stmt->cond());
+    loop_builder.BreakIfFalse();
+  }
+  Visit(stmt->body());
+  loop_builder.JumpToHeader();
+  loop_builder.EndLoop();
+}
+
+
+void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
+  if (stmt->init() != nullptr) {
+    Visit(stmt->init());
+  }
+  if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
+    // If the condition is known to be false there is no need to generate
+    // body, next or condition blocks. Init block should be generated.
+    return;
+  }
+
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+  loop_builder.LoopHeader();
+  loop_builder.Condition();
+  if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+    VisitForAccumulatorValue(stmt->cond());
+    loop_builder.BreakIfFalse();
+  }
+  Visit(stmt->body());
+  if (stmt->next() != nullptr) {
+    loop_builder.Next();
+    Visit(stmt->next());
+  }
+  loop_builder.JumpToHeader();
+  loop_builder.EndLoop();
+}
+
+
+void BytecodeGenerator::VisitForInAssignment(Expression* expr,
+                                             FeedbackVectorSlot slot) {
+  DCHECK(expr->IsValidReferenceExpression());
+
+  // Evaluate assignment starting with the value to be stored in the
+  // accumulator.
+  Property* property = expr->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(property);
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->AsVariableProxy()->var();
+      VisitVariableAssignment(variable, slot);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      Register value = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      Register object = VisitForRegisterValue(property->obj());
+      Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+      builder()->LoadAccumulatorWithRegister(value);
+      builder()->StoreNamedProperty(object, name, feedback_index(slot),
+                                    language_mode());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      Register value = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      Register object = VisitForRegisterValue(property->obj());
+      Register key = VisitForRegisterValue(property->key());
+      builder()->LoadAccumulatorWithRegister(value);
+      builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+                                    language_mode());
+      break;
+    }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  if (stmt->subject()->IsNullLiteral() ||
+      stmt->subject()->IsUndefinedLiteral(isolate())) {
+    // ForIn generates lots of code, skip if it wouldn't produce any effects.
+    return;
+  }
+
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+  BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
+
+  // Prepare the state for executing ForIn.
+  VisitForAccumulatorValue(stmt->subject());
+  builder()->JumpIfUndefined(&subject_undefined_label);
+  builder()->JumpIfNull(&subject_null_label);
+  Register receiver = register_allocator()->NewRegister();
+  builder()->CastAccumulatorToJSObject();
+  builder()->JumpIfNull(&not_object_label);
+  builder()->StoreAccumulatorInRegister(receiver);
+  Register cache_type = register_allocator()->NewRegister();
+  Register cache_array = register_allocator()->NewRegister();
+  Register cache_length = register_allocator()->NewRegister();
+  builder()->ForInPrepare(cache_type, cache_array, cache_length);
+
+  // Set up loop counter
+  Register index = register_allocator()->NewRegister();
+  builder()->LoadLiteral(Smi::FromInt(0));
+  builder()->StoreAccumulatorInRegister(index);
+
+  // The loop
+  loop_builder.LoopHeader();
+  loop_builder.Condition();
+  builder()->ForInDone(index, cache_length);
+  loop_builder.BreakIfTrue();
+  builder()->ForInNext(receiver, cache_type, cache_array, index);
+  loop_builder.ContinueIfUndefined();
+  VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+  Visit(stmt->body());
+  loop_builder.Next();
+  builder()->ForInStep(index);
+  builder()->StoreAccumulatorInRegister(index);
+  loop_builder.JumpToHeader();
+  loop_builder.EndLoop();
+  builder()->Bind(&not_object_label);
+  builder()->Bind(&subject_null_label);
+  builder()->Bind(&subject_undefined_label);
+}
+
+
+void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  if (FLAG_ignition_fake_try_catch) {
+    Visit(stmt->try_block());
+    return;
+  }
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  if (FLAG_ignition_fake_try_catch) {
+    Visit(stmt->try_block());
+    Visit(stmt->finally_block());
+    return;
+  }
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  // Find or build a shared function info.
+  Handle<SharedFunctionInfo> shared_info =
+      Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+  CHECK(!shared_info.is_null());  // TODO(rmcilroy): Set stack overflow?
+  builder()->CreateClosure(shared_info,
+                           expr->pretenure() ? TENURED : NOT_TENURED);
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitNativeFunctionLiteral(
+    NativeFunctionLiteral* expr) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitConditional(Conditional* expr) {
+  // TODO(rmcilroy): Spot easy cases where there code would not need to
+  // emit the then block or the else block, e.g. condition is
+  // obviously true/1/false/0.
+
+  BytecodeLabel else_label, end_label;
+
+  VisitForAccumulatorValue(expr->condition());
+  builder()->JumpIfFalse(&else_label);
+
+  VisitForAccumulatorValue(expr->then_expression());
+  builder()->Jump(&end_label);
+
+  builder()->Bind(&else_label);
+  VisitForAccumulatorValue(expr->else_expression());
+  builder()->Bind(&end_label);
+
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitLiteral(Literal* expr) {
+  if (!execution_result()->IsEffect()) {
+    Handle<Object> value = expr->value();
+    if (value->IsSmi()) {
+      builder()->LoadLiteral(Smi::cast(*value));
+    } else if (value->IsUndefined()) {
+      builder()->LoadUndefined();
+    } else if (value->IsTrue()) {
+      builder()->LoadTrue();
+    } else if (value->IsFalse()) {
+      builder()->LoadFalse();
+    } else if (value->IsNull()) {
+      builder()->LoadNull();
+    } else if (value->IsTheHole()) {
+      builder()->LoadTheHole();
+    } else {
+      builder()->LoadLiteral(value);
+    }
+    execution_result()->SetResultInAccumulator();
+  }
+}
+
+
+void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  // Materialize a regular expression literal.
+  builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
+                                 expr->flags());
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  // Deep-copy the literal boilerplate.
+  builder()->CreateObjectLiteral(expr->constant_properties(),
+                                 expr->literal_index(),
+                                 expr->ComputeFlags(true));
+  Register literal;
+
+  // Store computed values into the literal.
+  bool literal_in_accumulator = true;
+  int property_index = 0;
+  AccessorTable accessor_table(zone());
+  for (; property_index < expr->properties()->length(); property_index++) {
+    ObjectLiteral::Property* property = expr->properties()->at(property_index);
+    if (property->is_computed_name()) break;
+    if (property->IsCompileTimeValue()) continue;
+
+    if (literal_in_accumulator) {
+      literal = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(literal);
+      literal_in_accumulator = false;
+    }
+
+    RegisterAllocationScope inner_register_scope(this);
+    Literal* literal_key = property->key()->AsLiteral();
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (literal_key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(property->value());
+            builder()->StoreNamedProperty(
+                literal, literal_key->AsPropertyName(),
+                feedback_index(property->GetSlot(0)), language_mode());
+          } else {
+            VisitForEffect(property->value());
+          }
+        } else {
+          register_allocator()->PrepareForConsecutiveAllocations(3);
+          Register key = register_allocator()->NextConsecutiveRegister();
+          Register value = register_allocator()->NextConsecutiveRegister();
+          Register language = register_allocator()->NextConsecutiveRegister();
+          // TODO(oth): This is problematic - can't assume contiguous here.
+          // literal is allocated in outer register scope, whereas key, value,
+          // language are in another.
+          DCHECK(Register::AreContiguous(literal, key, value, language));
+          VisitForAccumulatorValue(property->key());
+          builder()->StoreAccumulatorInRegister(key);
+          VisitForAccumulatorValue(property->value());
+          builder()->StoreAccumulatorInRegister(value);
+          if (property->emit_store()) {
+            builder()
+                ->LoadLiteral(Smi::FromInt(SLOPPY))
+                .StoreAccumulatorInRegister(language)
+                .CallRuntime(Runtime::kSetProperty, literal, 4);
+            VisitSetHomeObject(value, literal, property);
+          }
+        }
+        break;
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        register_allocator()->PrepareForConsecutiveAllocations(1);
+        DCHECK(property->emit_store());
+        Register value = register_allocator()->NextConsecutiveRegister();
+        DCHECK(Register::AreContiguous(literal, value));
+        VisitForAccumulatorValue(property->value());
+        builder()->StoreAccumulatorInRegister(value).CallRuntime(
+            Runtime::kInternalSetPrototype, literal, 2);
+        break;
+      }
+      case ObjectLiteral::Property::GETTER:
+        if (property->emit_store()) {
+          accessor_table.lookup(literal_key)->second->getter = property;
+        }
+        break;
+      case ObjectLiteral::Property::SETTER:
+        if (property->emit_store()) {
+          accessor_table.lookup(literal_key)->second->setter = property;
+        }
+        break;
+    }
+  }
+
+  // Define accessors, using only a single call to the runtime for each pair of
+  // corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    RegisterAllocationScope inner_register_scope(this);
+    register_allocator()->PrepareForConsecutiveAllocations(4);
+    Register name = register_allocator()->NextConsecutiveRegister();
+    Register getter = register_allocator()->NextConsecutiveRegister();
+    Register setter = register_allocator()->NextConsecutiveRegister();
+    Register attr = register_allocator()->NextConsecutiveRegister();
+    DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+    VisitForAccumulatorValue(it->first);
+    builder()->StoreAccumulatorInRegister(name);
+    VisitObjectLiteralAccessor(literal, it->second->getter, getter);
+    VisitObjectLiteralAccessor(literal, it->second->setter, setter);
+    builder()
+        ->LoadLiteral(Smi::FromInt(NONE))
+        .StoreAccumulatorInRegister(attr)
+        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+  }
+
+  // Object literals have two parts. The "static" part on the left contains no
+  // computed property names, and so we can compute its map ahead of time; see
+  // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
+  // with the first computed property name and continues with all properties to
+  // its right. All the code from above initializes the static component of the
+  // object literal, and arranges for the map of the result to reflect the
+  // static order in which the keys appear. For the dynamic properties, we
+  // compile them into a series of "SetOwnProperty" runtime calls. This will
+  // preserve insertion order.
+  for (; property_index < expr->properties()->length(); property_index++) {
+    if (literal_in_accumulator) {
+      literal = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(literal);
+      literal_in_accumulator = false;
+    }
+
+    ObjectLiteral::Property* property = expr->properties()->at(property_index);
+    RegisterAllocationScope inner_register_scope(this);
+    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+      DCHECK(property->emit_store());
+      Register value = register_allocator()->NewRegister();
+      DCHECK(Register::AreContiguous(literal, value));
+      VisitForAccumulatorValue(property->value());
+      builder()->StoreAccumulatorInRegister(value).CallRuntime(
+          Runtime::kInternalSetPrototype, literal, 2);
+      continue;
+    }
+
+    register_allocator()->PrepareForConsecutiveAllocations(3);
+    Register key = register_allocator()->NextConsecutiveRegister();
+    Register value = register_allocator()->NextConsecutiveRegister();
+    Register attr = register_allocator()->NextConsecutiveRegister();
+    DCHECK(Register::AreContiguous(literal, key, value, attr));
+
+    VisitForAccumulatorValue(property->key());
+    builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+    VisitForAccumulatorValue(property->value());
+    builder()->StoreAccumulatorInRegister(value);
+    VisitSetHomeObject(value, literal, property);
+    builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
+    Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+      case ObjectLiteral::Property::COMPUTED:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        function_id = Runtime::kDefineDataPropertyUnchecked;
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        UNREACHABLE();  // Handled specially above.
+        break;
+      case ObjectLiteral::Property::GETTER:
+        function_id = Runtime::kDefineGetterPropertyUnchecked;
+        break;
+      case ObjectLiteral::Property::SETTER:
+        function_id = Runtime::kDefineSetterPropertyUnchecked;
+        break;
+    }
+    builder()->CallRuntime(function_id, literal, 4);
+  }
+
+  // Transform literals that contain functions to fast properties.
+  if (expr->has_function()) {
+    DCHECK(!literal_in_accumulator);
+    builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
+  }
+
+  if (!literal_in_accumulator) {
+    // Restore literal array into accumulator.
+    builder()->LoadAccumulatorWithRegister(literal);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  // Deep-copy the literal boilerplate.
+  builder()->CreateArrayLiteral(expr->constant_elements(),
+                                expr->literal_index(),
+                                expr->ComputeFlags(true));
+  Register index, literal;
+
+  // Evaluate all the non-constant subexpressions and store them into the
+  // newly cloned array.
+  bool literal_in_accumulator = true;
+  for (int array_index = 0; array_index < expr->values()->length();
+       array_index++) {
+    Expression* subexpr = expr->values()->at(array_index);
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+    if (subexpr->IsSpread()) {
+      // TODO(rmcilroy): Deal with spread expressions.
+      UNIMPLEMENTED();
+    }
+
+    if (literal_in_accumulator) {
+      index = register_allocator()->NewRegister();
+      literal = register_allocator()->NewRegister();
+      builder()->StoreAccumulatorInRegister(literal);
+      literal_in_accumulator = false;
+    }
+
+    FeedbackVectorSlot slot = expr->LiteralFeedbackSlot();
+    builder()
+        ->LoadLiteral(Smi::FromInt(array_index))
+        .StoreAccumulatorInRegister(index);
+    VisitForAccumulatorValue(subexpr);
+    builder()->StoreKeyedProperty(literal, index, feedback_index(slot),
+                                  language_mode());
+  }
+
+  if (!literal_in_accumulator) {
+    // Restore literal array into accumulator.
+    builder()->LoadAccumulatorWithRegister(literal);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
+  VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+}
+
+
+void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+                                          FeedbackVectorSlot slot,
+                                          TypeofMode typeof_mode) {
+  switch (variable->location()) {
+    case VariableLocation::LOCAL: {
+      Register source(Register(variable->index()));
+      builder()->LoadAccumulatorWithRegister(source);
+      execution_result()->SetResultInAccumulator();
+      break;
+    }
+    case VariableLocation::PARAMETER: {
+      // The parameter indices are shifted by 1 (receiver is variable
+      // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+      Register source = builder()->Parameter(variable->index() + 1);
+      builder()->LoadAccumulatorWithRegister(source);
+      execution_result()->SetResultInAccumulator();
+      break;
+    }
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      builder()->LoadGlobal(variable->name(), feedback_index(slot),
+                            language_mode(), typeof_mode);
+      execution_result()->SetResultInAccumulator();
+      break;
+    }
+    case VariableLocation::CONTEXT: {
+      int depth = execution_context()->ContextChainDepth(variable->scope());
+      ContextScope* context = execution_context()->Previous(depth);
+      Register context_reg;
+      if (context) {
+        context_reg = context->reg();
+      } else {
+        context_reg = register_allocator()->NewRegister();
+        // Walk the context chain to find the context at the given depth.
+        // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+        // a generic mechanism for performing jumps in interpreter.cc.
+        // TODO(mythria): Also update bytecode graph builder with correct depth
+        // when this changes.
+        builder()
+            ->LoadAccumulatorWithRegister(execution_context()->reg())
+            .StoreAccumulatorInRegister(context_reg);
+        for (int i = 0; i < depth; ++i) {
+          builder()
+              ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+              .StoreAccumulatorInRegister(context_reg);
+        }
+      }
+      builder()->LoadContextSlot(context_reg, variable->index());
+      execution_result()->SetResultInAccumulator();
+      // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
+      // let variables.
+      break;
+    }
+    case VariableLocation::LOOKUP: {
+      builder()->LoadLookupSlot(variable->name(), typeof_mode);
+      execution_result()->SetResultInAccumulator();
+      break;
+    }
+  }
+}
+
+
+void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
+    Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+  AccumulatorResultScope accumulator_result(this);
+  VisitVariableLoad(variable, slot, typeof_mode);
+}
+
+
+Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
+    Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+  RegisterResultScope register_scope(this);
+  VisitVariableLoad(variable, slot, typeof_mode);
+  return register_scope.ResultRegister();
+}
+
+
+void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+                                                FeedbackVectorSlot slot) {
+  switch (variable->location()) {
+    case VariableLocation::LOCAL: {
+      // TODO(rmcilroy): support const mode initialization.
+      Register destination(variable->index());
+      builder()->StoreAccumulatorInRegister(destination);
+      break;
+    }
+    case VariableLocation::PARAMETER: {
+      // The parameter indices are shifted by 1 (receiver is variable
+      // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+      Register destination(builder()->Parameter(variable->index() + 1));
+      builder()->StoreAccumulatorInRegister(destination);
+      break;
+    }
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      builder()->StoreGlobal(variable->name(), feedback_index(slot),
+                             language_mode());
+      break;
+    }
+    case VariableLocation::CONTEXT: {
+      // TODO(rmcilroy): support const mode initialization.
+      int depth = execution_context()->ContextChainDepth(variable->scope());
+      ContextScope* context = execution_context()->Previous(depth);
+      Register context_reg;
+      if (context) {
+        context_reg = context->reg();
+      } else {
+        Register value_temp = register_allocator()->NewRegister();
+        context_reg = register_allocator()->NewRegister();
+        // Walk the context chain to find the context at the given depth.
+        // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+        // a generic mechanism for performing jumps in interpreter.cc.
+        // TODO(mythria): Also update bytecode graph builder with correct depth
+        // when this changes.
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadAccumulatorWithRegister(execution_context()->reg())
+            .StoreAccumulatorInRegister(context_reg);
+        for (int i = 0; i < depth; ++i) {
+          builder()
+              ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+              .StoreAccumulatorInRegister(context_reg);
+        }
+        builder()->LoadAccumulatorWithRegister(value_temp);
+      }
+      builder()->StoreContextSlot(context_reg, variable->index());
+      break;
+    }
+    case VariableLocation::LOOKUP: {
+      builder()->StoreLookupSlot(variable->name(), language_mode());
+      break;
+    }
+  }
+}
+
+
+void BytecodeGenerator::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpression());
+  Register object, key;
+  Handle<String> name;
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(property);
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do to evaluate variable assignment LHS.
+      break;
+    case NAMED_PROPERTY: {
+      object = VisitForRegisterValue(property->obj());
+      name = property->key()->AsLiteral()->AsPropertyName();
+      break;
+    }
+    case KEYED_PROPERTY: {
+      object = VisitForRegisterValue(property->obj());
+      if (expr->is_compound()) {
+        // Use VisitForAccumulator and store to register so that the key is
+        // still in the accumulator for loading the old value below.
+        key = register_allocator()->NewRegister();
+        VisitForAccumulatorValue(property->key());
+        builder()->StoreAccumulatorInRegister(key);
+      } else {
+        key = VisitForRegisterValue(property->key());
+      }
+      break;
+    }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+
+  // Evaluate the value and potentially handle compound assignments by loading
+  // the left-hand side value and performing a binary operation.
+  if (expr->is_compound()) {
+    Register old_value;
+    switch (assign_type) {
+      case VARIABLE: {
+        VariableProxy* proxy = expr->target()->AsVariableProxy();
+        old_value = VisitVariableLoadForRegisterValue(
+            proxy->var(), proxy->VariableFeedbackSlot());
+        break;
+      }
+      case NAMED_PROPERTY: {
+        FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+        old_value = register_allocator()->NewRegister();
+        builder()
+            ->LoadNamedProperty(object, name, feedback_index(slot),
+                                language_mode())
+            .StoreAccumulatorInRegister(old_value);
+        break;
+      }
+      case KEYED_PROPERTY: {
+        // Key is already in accumulator at this point due to evaluating the
+        // LHS above.
+        FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+        old_value = register_allocator()->NewRegister();
+        builder()
+            ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+            .StoreAccumulatorInRegister(old_value);
+        break;
+      }
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
+        UNIMPLEMENTED();
+        break;
+    }
+    VisitForAccumulatorValue(expr->value());
+    builder()->BinaryOperation(expr->binary_op(), old_value,
+                               language_mode_strength());
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Store the value.
+  FeedbackVectorSlot slot = expr->AssignmentSlot();
+  switch (assign_type) {
+    case VARIABLE: {
+      // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+      // Is the value in the accumulator safe? Yes, but scary.
+      Variable* variable = expr->target()->AsVariableProxy()->var();
+      VisitVariableAssignment(variable, slot);
+      break;
+    }
+    case NAMED_PROPERTY:
+      builder()->StoreNamedProperty(object, name, feedback_index(slot),
+                                    language_mode());
+      break;
+    case KEYED_PROPERTY:
+      builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+                                    language_mode());
+      break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitThrow(Throw* expr) {
+  VisitForAccumulatorValue(expr->exception());
+  builder()->Throw();
+}
+
+
+void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
+  LhsKind property_kind = Property::GetAssignType(expr);
+  FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  switch (property_kind) {
+    case VARIABLE:
+      UNREACHABLE();
+    case NAMED_PROPERTY: {
+      builder()->LoadNamedProperty(obj,
+                                   expr->key()->AsLiteral()->AsPropertyName(),
+                                   feedback_index(slot), language_mode());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      VisitForAccumulatorValue(expr->key());
+      builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+      break;
+    }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
+                                                        Property* expr) {
+  AccumulatorResultScope result_scope(this);
+  VisitPropertyLoad(obj, expr);
+}
+
+
+void BytecodeGenerator::VisitProperty(Property* expr) {
+  Register obj = VisitForRegisterValue(expr->obj());
+  VisitPropertyLoad(obj, expr);
+}
+
+
+Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
+  if (args->length() == 0) {
+    return Register();
+  }
+
+  // Visit arguments and place in a contiguous block of temporary
+  // registers.  Return the first temporary register corresponding to
+  // the first argument.
+  //
+  // NB the caller may have already called
+  // PrepareForConsecutiveAllocations() with args->length() + N. The
+  // second call here will be a no-op provided there have been N or
+  // less calls to NextConsecutiveRegister(). Otherwise, the arguments
+  // here will be consecutive, but they will not be consecutive with
+  // earlier consecutive allocations made by the caller.
+  register_allocator()->PrepareForConsecutiveAllocations(args->length());
+
+  // Visit for first argument that goes into returned register
+  Register first_arg = register_allocator()->NextConsecutiveRegister();
+  VisitForAccumulatorValue(args->at(0));
+  builder()->StoreAccumulatorInRegister(first_arg);
+
+  // Visit remaining arguments
+  for (int i = 1; i < static_cast<int>(args->length()); i++) {
+    Register ith_arg = register_allocator()->NextConsecutiveRegister();
+    VisitForAccumulatorValue(args->at(i));
+    builder()->StoreAccumulatorInRegister(ith_arg);
+    DCHECK(ith_arg.index() - i == first_arg.index());
+  }
+  return first_arg;
+}
+
+
+void BytecodeGenerator::VisitCall(Call* expr) {
+  Expression* callee_expr = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  // Prepare the callee and the receiver to the function call. This depends on
+  // the semantics of the underlying call type.
+
+  // The receiver and arguments need to be allocated consecutively for
+  // Call(). We allocate the callee and receiver consecutively for calls to
+  // kLoadLookupSlot. Future optimizations could avoid this there are no
+  // arguments or the receiver and arguments are already consecutive.
+  ZoneList<Expression*>* args = expr->arguments();
+  register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
+  Register callee = register_allocator()->NextConsecutiveRegister();
+  Register receiver = register_allocator()->NextConsecutiveRegister();
+
+  switch (call_type) {
+    case Call::NAMED_PROPERTY_CALL:
+    case Call::KEYED_PROPERTY_CALL: {
+      Property* property = callee_expr->AsProperty();
+      VisitForAccumulatorValue(property->obj());
+      builder()->StoreAccumulatorInRegister(receiver);
+      VisitPropertyLoadForAccumulator(receiver, property);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
+    case Call::GLOBAL_CALL: {
+      // Receiver is undefined for global calls.
+      builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+      // Load callee as a global variable.
+      VariableProxy* proxy = callee_expr->AsVariableProxy();
+      VisitVariableLoadForAccumulatorValue(proxy->var(),
+                                           proxy->VariableFeedbackSlot());
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
+    case Call::LOOKUP_SLOT_CALL:
+    case Call::POSSIBLY_EVAL_CALL: {
+      if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+        RegisterAllocationScope inner_register_scope(this);
+        register_allocator()->PrepareForConsecutiveAllocations(2);
+        Register context = register_allocator()->NextConsecutiveRegister();
+        Register name = register_allocator()->NextConsecutiveRegister();
+
+        // Call LoadLookupSlot to get the callee and receiver.
+        DCHECK(Register::AreContiguous(callee, receiver));
+        Variable* variable = callee_expr->AsVariableProxy()->var();
+        builder()
+            ->MoveRegister(Register::function_context(), context)
+            .LoadLiteral(variable->name())
+            .StoreAccumulatorInRegister(name)
+            .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+        break;
+      }
+      // Fall through.
+      DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+    }
+    case Call::OTHER_CALL: {
+      builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+      VisitForAccumulatorValue(callee_expr);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
+    case Call::NAMED_SUPER_PROPERTY_CALL:
+    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::SUPER_CALL:
+      UNIMPLEMENTED();
+  }
+
+  // Evaluate all arguments to the function call and store in sequential
+  // registers.
+  Register arg = VisitArguments(args);
+  CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
+
+  // Resolve callee for a potential direct eval call. This block will mutate the
+  // callee value.
+  if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+    RegisterAllocationScope inner_register_scope(this);
+    register_allocator()->PrepareForConsecutiveAllocations(5);
+    Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
+    Register source = register_allocator()->NextConsecutiveRegister();
+    Register function = register_allocator()->NextConsecutiveRegister();
+    Register language = register_allocator()->NextConsecutiveRegister();
+    Register position = register_allocator()->NextConsecutiveRegister();
+
+    // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
+    // strings and function closure, and loading language and
+    // position.
+    builder()
+        ->MoveRegister(callee, callee_for_eval)
+        .MoveRegister(arg, source)
+        .MoveRegister(Register::function_closure(), function)
+        .LoadLiteral(Smi::FromInt(language_mode()))
+        .StoreAccumulatorInRegister(language)
+        .LoadLiteral(
+            Smi::FromInt(execution_context()->scope()->start_position()))
+        .StoreAccumulatorInRegister(position);
+
+    // Call ResolvePossiblyDirectEval and modify the callee.
+    builder()
+        ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+        .StoreAccumulatorInRegister(callee);
+  }
+
+  // TODO(rmcilroy): Use CallIC to allow call type feedback.
+  builder()->Call(callee, receiver, args->length(),
+                  feedback_index(expr->CallFeedbackICSlot()));
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitCallNew(CallNew* expr) {
+  Register constructor = register_allocator()->NewRegister();
+  VisitForAccumulatorValue(expr->expression());
+  builder()->StoreAccumulatorInRegister(constructor);
+
+  ZoneList<Expression*>* args = expr->arguments();
+  Register first_arg = VisitArguments(args);
+  builder()->New(constructor, first_arg, args->length());
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  Register receiver;
+  if (expr->is_jsruntime()) {
+    // Allocate a register for the receiver and load it with undefined.
+    register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
+    receiver = register_allocator()->NextConsecutiveRegister();
+    builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+  }
+  // Evaluate all arguments to the runtime call.
+  Register first_arg = VisitArguments(args);
+
+  if (expr->is_jsruntime()) {
+    DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+    builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+  } else {
+    Runtime::FunctionId function_id = expr->function()->function_id;
+    builder()->CallRuntime(function_id, first_arg, args->length());
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
+  VisitForEffect(expr->expression());
+  builder()->LoadUndefined();
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
+  if (expr->expression()->IsVariableProxy()) {
+    // Typeof does not throw a reference error on global variables, hence we
+    // perform a non-contextual load in case the operand is a variable proxy.
+    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    VisitVariableLoadForAccumulatorValue(
+        proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+  } else {
+    VisitForAccumulatorValue(expr->expression());
+  }
+  builder()->TypeOf();
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
+  VisitForAccumulatorValue(expr->expression());
+  builder()->LogicalNot();
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::Value::NOT:
+      VisitNot(expr);
+      break;
+    case Token::Value::TYPEOF:
+      VisitTypeOf(expr);
+      break;
+    case Token::Value::VOID:
+      VisitVoid(expr);
+      break;
+    case Token::Value::DELETE:
+      VisitDelete(expr);
+      break;
+    case Token::Value::BIT_NOT:
+    case Token::Value::ADD:
+    case Token::Value::SUB:
+      // These operators are converted to an equivalent binary operators in
+      // the parser. These operators are not expected to be visited here.
+      UNREACHABLE();
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
+  if (expr->expression()->IsProperty()) {
+    // Delete of an object property is allowed both in sloppy
+    // and strict modes.
+    Property* property = expr->expression()->AsProperty();
+    Register object = VisitForRegisterValue(property->obj());
+    VisitForAccumulatorValue(property->key());
+    builder()->Delete(object, language_mode());
+  } else if (expr->expression()->IsVariableProxy()) {
+    // Delete of an unqualified identifier is allowed in sloppy mode but is
+    // not allowed in strict mode. Deleting 'this' is allowed in both modes.
+    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    Variable* variable = proxy->var();
+    DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+    switch (variable->location()) {
+      case VariableLocation::GLOBAL:
+      case VariableLocation::UNALLOCATED: {
+        // Global var, let, const or variables not explicitly declared.
+        Register native_context = register_allocator()->NewRegister();
+        Register global_object = register_allocator()->NewRegister();
+        builder()
+            ->LoadContextSlot(execution_context()->reg(),
+                              Context::NATIVE_CONTEXT_INDEX)
+            .StoreAccumulatorInRegister(native_context)
+            .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
+            .StoreAccumulatorInRegister(global_object)
+            .LoadLiteral(variable->name())
+            .Delete(global_object, language_mode());
+        break;
+      }
+      case VariableLocation::PARAMETER:
+      case VariableLocation::LOCAL:
+      case VariableLocation::CONTEXT: {
+        // Deleting local var/let/const, context variables, and arguments
+        // does not have any effect.
+        if (variable->HasThisName(isolate())) {
+          builder()->LoadTrue();
+        } else {
+          builder()->LoadFalse();
+        }
+        break;
+      }
+      case VariableLocation::LOOKUP: {
+        builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    // Delete of an unresolvable reference returns true.
+    VisitForEffect(expr->expression());
+    builder()->LoadTrue();
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->expression()->AsProperty();
+  LhsKind assign_type = Property::GetAssignType(property);
+
+  // TODO(rmcilroy): Set is_postfix to false if visiting for effect.
+  bool is_postfix = expr->is_postfix();
+
+  // Evaluate LHS expression and get old value.
+  Register obj, key, old_value;
+  Handle<String> name;
+  switch (assign_type) {
+    case VARIABLE: {
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+      VisitVariableLoadForAccumulatorValue(proxy->var(),
+                                           proxy->VariableFeedbackSlot());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+      obj = VisitForRegisterValue(property->obj());
+      name = property->key()->AsLiteral()->AsPropertyName();
+      builder()->LoadNamedProperty(obj, name, feedback_index(slot),
+                                   language_mode());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+      obj = VisitForRegisterValue(property->obj());
+      // Use visit for accumulator here since we need the key in the accumulator
+      // for the LoadKeyedProperty.
+      key = register_allocator()->NewRegister();
+      VisitForAccumulatorValue(property->key());
+      builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
+          obj, feedback_index(slot), language_mode());
+      break;
+    }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+
+  // Convert old value into a number.
+  if (!is_strong(language_mode())) {
+    builder()->CastAccumulatorToNumber();
+  }
+
+  // Save result for postfix expressions.
+  if (is_postfix) {
+    old_value = register_allocator()->outer()->NewRegister();
+    builder()->StoreAccumulatorInRegister(old_value);
+  }
+
+  // Perform +1/-1 operation.
+  builder()->CountOperation(expr->binary_op(), language_mode_strength());
+
+  // Store the value.
+  FeedbackVectorSlot feedback_slot = expr->CountSlot();
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      VisitVariableAssignment(variable, feedback_slot);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+                                    language_mode());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+                                    language_mode());
+      break;
+    }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNIMPLEMENTED();
+  }
+
+  // Restore old value for postfix expressions.
+  if (is_postfix) {
+    execution_result()->SetResultInRegister(old_value);
+  } else {
+    execution_result()->SetResultInAccumulator();
+  }
+}
+
+
+void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
+  switch (binop->op()) {
+    case Token::COMMA:
+      VisitCommaExpression(binop);
+      break;
+    case Token::OR:
+      VisitLogicalOrExpression(binop);
+      break;
+    case Token::AND:
+      VisitLogicalAndExpression(binop);
+      break;
+    default:
+      VisitArithmeticExpression(binop);
+      break;
+  }
+}
+
+
+void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  Register lhs = VisitForRegisterValue(expr->left());
+  VisitForAccumulatorValue(expr->right());
+  builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+  Register lhs = VisitForRegisterValue(expr->left());
+  VisitForAccumulatorValue(expr->right());
+  builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+
+void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
+  UNREACHABLE();
+}
+
+
+void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  execution_result()->SetResultInRegister(Register::function_closure());
+}
+
+
+void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitSuperPropertyReference(
+    SuperPropertyReference* expr) {
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
+  VisitForEffect(binop->left());
+  Visit(binop->right());
+}
+
+
+void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
+  Expression* left = binop->left();
+  Expression* right = binop->right();
+
+  // Short-circuit evaluation- If it is known that left is always true,
+  // no need to visit right
+  if (left->ToBooleanIsTrue()) {
+    VisitForAccumulatorValue(left);
+  } else {
+    BytecodeLabel end_label;
+    VisitForAccumulatorValue(left);
+    builder()->JumpIfTrue(&end_label);
+    VisitForAccumulatorValue(right);
+    builder()->Bind(&end_label);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
+  Expression* left = binop->left();
+  Expression* right = binop->right();
+
+  // Short-circuit evaluation- If it is known that left is always false,
+  // no need to visit right
+  if (left->ToBooleanIsFalse()) {
+    VisitForAccumulatorValue(left);
+  } else {
+    BytecodeLabel end_label;
+    VisitForAccumulatorValue(left);
+    builder()->JumpIfFalse(&end_label);
+    VisitForAccumulatorValue(right);
+    builder()->Bind(&end_label);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitRewritableAssignmentExpression(
+    RewritableAssignmentExpression* expr) {
+  Visit(expr->expression());
+}
+
+
+void BytecodeGenerator::VisitNewLocalFunctionContext() {
+  AccumulatorResultScope accumulator_execution_result(this);
+  Scope* scope = this->scope();
+
+  // Allocate a new local context.
+  if (scope->is_script_scope()) {
+    RegisterAllocationScope register_scope(this);
+    Register closure = register_allocator()->NewRegister();
+    Register scope_info = register_allocator()->NewRegister();
+    DCHECK(Register::AreContiguous(closure, scope_info));
+    builder()
+        ->LoadAccumulatorWithRegister(Register::function_closure())
+        .StoreAccumulatorInRegister(closure)
+        .LoadLiteral(scope->GetScopeInfo(isolate()))
+        .StoreAccumulatorInRegister(scope_info)
+        .CallRuntime(Runtime::kNewScriptContext, closure, 2);
+  } else {
+    builder()->CallRuntime(Runtime::kNewFunctionContext,
+                           Register::function_closure(), 1);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitBuildLocalActivationContext() {
+  Scope* scope = this->scope();
+
+  if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
+    Variable* variable = scope->receiver();
+    Register receiver(builder()->Parameter(0));
+    // Context variable (at bottom of the context chain).
+    DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+    builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
+        execution_context()->reg(), variable->index());
+  }
+
+  // Copy parameters into context if necessary.
+  int num_parameters = scope->num_parameters();
+  for (int i = 0; i < num_parameters; i++) {
+    Variable* variable = scope->parameter(i);
+    if (!variable->IsContextSlot()) continue;
+
+    // The parameter indices are shifted by 1 (receiver is variable
+    // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+    Register parameter(builder()->Parameter(i + 1));
+    // Context variable (at bottom of the context chain).
+    DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+    builder()->LoadAccumulatorWithRegister(parameter)
+        .StoreContextSlot(execution_context()->reg(), variable->index());
+  }
+}
+
+
+void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
+  AccumulatorResultScope accumulator_execution_result(this);
+  DCHECK(scope->is_block_scope());
+
+  // Allocate a new local block context.
+  register_allocator()->PrepareForConsecutiveAllocations(2);
+  Register scope_info = register_allocator()->NextConsecutiveRegister();
+  Register closure = register_allocator()->NextConsecutiveRegister();
+
+  builder()
+      ->LoadLiteral(scope->GetScopeInfo(isolate()))
+      .StoreAccumulatorInRegister(scope_info);
+  VisitFunctionClosureForContext();
+  builder()
+      ->StoreAccumulatorInRegister(closure)
+      .CallRuntime(Runtime::kPushBlockContext, scope_info, 2);
+  execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitObjectLiteralAccessor(
+    Register home_object, ObjectLiteralProperty* property, Register value_out) {
+  // TODO(rmcilroy): Replace value_out with VisitForRegister();
+  if (property == nullptr) {
+    builder()->LoadNull().StoreAccumulatorInRegister(value_out);
+  } else {
+    VisitForAccumulatorValue(property->value());
+    builder()->StoreAccumulatorInRegister(value_out);
+    VisitSetHomeObject(value_out, home_object, property);
+  }
+}
+
+
+void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
+                                           ObjectLiteralProperty* property,
+                                           int slot_number) {
+  Expression* expr = property->value();
+  if (!FunctionLiteral::NeedsHomeObject(expr)) return;
+
+  UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
+  if (variable == nullptr) return;
+
+  DCHECK(variable->IsContextSlot() || variable->IsStackAllocated());
+
+  // Allocate and initialize a new arguments object and assign to the
+  // {arguments} variable.
+  CreateArgumentsType type =
+      is_strict(language_mode()) || !info()->has_simple_parameters()
+          ? CreateArgumentsType::kUnmappedArguments
+          : CreateArgumentsType::kMappedArguments;
+  builder()->CreateArguments(type);
+  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
+  if (variable == nullptr) return;
+
+  // TODO(rmcilroy): Remove once we have tests which exercise this code path.
+  UNIMPLEMENTED();
+
+  // Store the closure we were called with in the given variable.
+  builder()->LoadAccumulatorWithRegister(Register::function_closure());
+  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
+  if (variable == nullptr) return;
+
+  // Store the new target we were called with in the given variable.
+  builder()->LoadAccumulatorWithRegister(Register::new_target());
+  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitFunctionClosureForContext() {
+  AccumulatorResultScope accumulator_execution_result(this);
+  Scope* closure_scope = execution_context()->scope()->ClosureScope();
+  if (closure_scope->is_script_scope() ||
+      closure_scope->is_module_scope()) {
+    // Contexts nested in the native context have a canonical empty function as
+    // their closure, not the anonymous closure containing the global code.
+    Register native_context = register_allocator()->NewRegister();
+    builder()
+        ->LoadContextSlot(execution_context()->reg(),
+                          Context::NATIVE_CONTEXT_INDEX)
+        .StoreAccumulatorInRegister(native_context)
+        .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+  } else {
+    DCHECK(closure_scope->is_function_scope());
+    builder()->LoadAccumulatorWithRegister(Register::function_closure());
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+
+// Visits the expression |expr| and places the result in the accumulator.
+void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
+  AccumulatorResultScope accumulator_scope(this);
+  Visit(expr);
+}
+
+
+// Visits the expression |expr| and discards the result.
+void BytecodeGenerator::VisitForEffect(Expression* expr) {
+  EffectResultScope effect_scope(this);
+  Visit(expr);
+}
+
+
+// Visits the expression |expr| and returns the register containing
+// the expression result.
+Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
+  RegisterResultScope register_scope(this);
+  Visit(expr);
+  return register_scope.ResultRegister();
+}
+
+
+Register BytecodeGenerator::NextContextRegister() const {
+  if (execution_context() == nullptr) {
+    // Return the incoming function context for the outermost execution context.
+    return Register::function_context();
+  }
+  Register previous = execution_context()->reg();
+  if (previous == Register::function_context()) {
+    // If the previous context was the incoming function context, then the next
+    // context register is the first local context register.
+    return builder_.first_context_register();
+  } else {
+    // Otherwise use the next local context register.
+    DCHECK_LT(previous.index(), builder_.last_context_register().index());
+    return Register(previous.index() + 1);
+  }
+}
+
+
+LanguageMode BytecodeGenerator::language_mode() const {
+  return info()->language_mode();
+}
+
+
+Strength BytecodeGenerator::language_mode_strength() const {
+  return strength(language_mode());
+}
+
+
+int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
+  return info()->feedback_vector()->GetIndex(slot);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
new file mode 100644
index 0000000..8bda7be
--- /dev/null
+++ b/src/interpreter/bytecode-generator.h
@@ -0,0 +1,152 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_GENERATOR_H_
+#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
+
+#include "src/ast/ast.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeGenerator final : public AstVisitor {
+ public:
+  BytecodeGenerator(Isolate* isolate, Zone* zone);
+
+  Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Visiting function for declarations list and statements are overridden.
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+  void VisitStatements(ZoneList<Statement*>* statments) override;
+
+ private:
+  class ContextScope;
+  class ControlScope;
+  class ControlScopeForBreakable;
+  class ControlScopeForIteration;
+  class ExpressionResultScope;
+  class EffectResultScope;
+  class AccumulatorResultScope;
+  class RegisterResultScope;
+  class RegisterAllocationScope;
+
+  void MakeBytecodeBody();
+  Register NextContextRegister() const;
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+  // Dispatched from VisitBinaryOperation.
+  void VisitArithmeticExpression(BinaryOperation* binop);
+  void VisitCommaExpression(BinaryOperation* binop);
+  void VisitLogicalOrExpression(BinaryOperation* binop);
+  void VisitLogicalAndExpression(BinaryOperation* binop);
+
+  // Dispatched from VisitUnaryOperation.
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeOf(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+  void VisitDelete(UnaryOperation* expr);
+
+  // Used by flow control routines to evaluate loop condition.
+  void VisitCondition(Expression* expr);
+
+  // Helper visitors which perform common operations.
+  Register VisitArguments(ZoneList<Expression*>* arguments);
+
+  void VisitPropertyLoad(Register obj, Property* expr);
+  void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+
+  void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+                         TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+  void VisitVariableLoadForAccumulatorValue(
+      Variable* variable, FeedbackVectorSlot slot,
+      TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+  MUST_USE_RESULT Register
+  VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
+                                    TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+  void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+
+  void VisitArgumentsObject(Variable* variable);
+  void VisitThisFunctionVariable(Variable* variable);
+  void VisitNewTargetVariable(Variable* variable);
+  void VisitNewLocalFunctionContext();
+  void VisitBuildLocalActivationContext();
+  void VisitNewLocalBlockContext(Scope* scope);
+  void VisitFunctionClosureForContext();
+  void VisitSetHomeObject(Register value, Register home_object,
+                          ObjectLiteralProperty* property, int slot_number = 0);
+  void VisitObjectLiteralAccessor(Register home_object,
+                                  ObjectLiteralProperty* property,
+                                  Register value_out);
+  void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+
+  // Visitors for obtaining expression result in the accumulator, in a
+  // register, or just getting the effect.
+  void VisitForAccumulatorValue(Expression* expression);
+  MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
+  void VisitForEffect(Expression* node);
+
+  // Methods for tracking and remapping register.
+  void RecordStoreToRegister(Register reg);
+  Register LoadFromAliasedRegister(Register reg);
+
+  inline BytecodeArrayBuilder* builder() { return &builder_; }
+
+  inline Isolate* isolate() const { return isolate_; }
+  inline Zone* zone() const { return zone_; }
+
+  inline Scope* scope() const { return scope_; }
+  inline void set_scope(Scope* scope) { scope_ = scope; }
+  inline CompilationInfo* info() const { return info_; }
+  inline void set_info(CompilationInfo* info) { info_ = info; }
+
+  inline ControlScope* execution_control() const { return execution_control_; }
+  inline void set_execution_control(ControlScope* scope) {
+    execution_control_ = scope;
+  }
+  inline ContextScope* execution_context() const { return execution_context_; }
+  inline void set_execution_context(ContextScope* context) {
+    execution_context_ = context;
+  }
+  inline void set_execution_result(ExpressionResultScope* execution_result) {
+    execution_result_ = execution_result;
+  }
+  ExpressionResultScope* execution_result() const { return execution_result_; }
+  inline void set_register_allocator(
+      RegisterAllocationScope* register_allocator) {
+    register_allocator_ = register_allocator;
+  }
+  RegisterAllocationScope* register_allocator() const {
+    return register_allocator_;
+  }
+
+  ZoneVector<Handle<Object>>* globals() { return &globals_; }
+  inline LanguageMode language_mode() const;
+  Strength language_mode_strength() const;
+  int feedback_index(FeedbackVectorSlot slot) const;
+
+  Isolate* isolate_;
+  Zone* zone_;
+  BytecodeArrayBuilder builder_;
+  CompilationInfo* info_;
+  Scope* scope_;
+  ZoneVector<Handle<Object>> globals_;
+  ControlScope* execution_control_;
+  ContextScope* execution_context_;
+  ExpressionResultScope* execution_result_;
+  RegisterAllocationScope* register_allocator_;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_GENERATOR_H_
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
new file mode 100644
index 0000000..4efb612
--- /dev/null
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-allocator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeRegisterAllocator::BytecodeRegisterAllocator(
+    BytecodeArrayBuilder* builder)
+    : builder_(builder),
+      allocated_(builder->zone()),
+      next_consecutive_register_(-1),
+      next_consecutive_count_(-1) {}
+
+
+BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
+  for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
+    builder_->ReturnTemporaryRegister(*i);
+  }
+  allocated_.clear();
+}
+
+
+Register BytecodeRegisterAllocator::NewRegister() {
+  int allocated = -1;
+  if (next_consecutive_count_ <= 0) {
+    allocated = builder_->BorrowTemporaryRegister();
+  } else {
+    allocated = builder_->BorrowTemporaryRegisterNotInRange(
+        next_consecutive_register_,
+        next_consecutive_register_ + next_consecutive_count_ - 1);
+  }
+  allocated_.push_back(allocated);
+  return Register(allocated);
+}
+
+
+bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
+    Register reg) const {
+  for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
+    if (*i == reg.index()) return true;
+  }
+  return false;
+}
+
+
+void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
+  if (static_cast<int>(count) > next_consecutive_count_) {
+    next_consecutive_register_ =
+        builder_->PrepareForConsecutiveTemporaryRegisters(count);
+    next_consecutive_count_ = static_cast<int>(count);
+  }
+}
+
+
+Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
+  DCHECK_GE(next_consecutive_register_, 0);
+  DCHECK_GT(next_consecutive_count_, 0);
+  builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+  allocated_.push_back(next_consecutive_register_);
+  next_consecutive_count_--;
+  return Register(next_consecutive_register_++);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
new file mode 100644
index 0000000..74ab3a4
--- /dev/null
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder;
+class Register;
+
+// A class than allows the instantiator to allocate temporary registers that are
+// cleaned up when scope is closed.
+class BytecodeRegisterAllocator {
+ public:
+  explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+  ~BytecodeRegisterAllocator();
+  Register NewRegister();
+
+  void PrepareForConsecutiveAllocations(size_t count);
+  Register NextConsecutiveRegister();
+
+  bool RegisterIsAllocatedInThisScope(Register reg) const;
+
+  bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+
+ private:
+  void* operator new(size_t size);
+  void operator delete(void* p);
+
+  BytecodeArrayBuilder* builder_;
+  ZoneVector<int> allocated_;
+  int next_consecutive_register_;
+  int next_consecutive_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+
+#endif  // V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
new file mode 100644
index 0000000..fd778d7
--- /dev/null
+++ b/src/interpreter/bytecode-traits.h
@@ -0,0 +1,180 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_TRAITS_H_
+#define V8_INTERPRETER_BYTECODE_TRAITS_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+
+// Template helpers to deduce the number of operands each bytecode has.
+#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+
+template <OperandType>
+struct OperandTraits {};
+
+#define DECLARE_OPERAND_SIZE(Name, Size)             \
+  template <>                                        \
+  struct OperandTraits<OperandType::k##Name> {       \
+    static const OperandSize kSizeType = Size;       \
+    static const int kSize = static_cast<int>(Size); \
+  };
+OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
+#undef DECLARE_OPERAND_SIZE
+
+
+template <OperandType... Args>
+struct BytecodeTraits {};
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
+          OperandType operand_3>
+struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
+                      OPERAND_TERM> {
+  static OperandType GetOperandType(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const OperandType kOperands[] = {operand_0, operand_1, operand_2,
+                                     operand_3};
+    return kOperands[i];
+  }
+
+  static inline OperandSize GetOperandSize(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const OperandSize kOperandSizes[] =
+        {OperandTraits<operand_0>::kSizeType,
+         OperandTraits<operand_1>::kSizeType,
+         OperandTraits<operand_2>::kSizeType,
+         OperandTraits<operand_3>::kSizeType};
+    return kOperandSizes[i];
+  }
+
+  static inline int GetOperandOffset(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const int kOffset0 = 1;
+    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+    const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
+    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
+    return kOperandOffsets[i];
+  }
+
+  static const int kOperandCount = 4;
+  static const int kSize =
+      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+      OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
+};
+
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+  static inline OperandType GetOperandType(int i) {
+    DCHECK(0 <= i && i <= 2);
+    const OperandType kOperands[] = {operand_0, operand_1, operand_2};
+    return kOperands[i];
+  }
+
+  static inline OperandSize GetOperandSize(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const OperandSize kOperandSizes[] =
+        {OperandTraits<operand_0>::kSizeType,
+         OperandTraits<operand_1>::kSizeType,
+         OperandTraits<operand_2>::kSizeType};
+    return kOperandSizes[i];
+  }
+
+  static inline int GetOperandOffset(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const int kOffset0 = 1;
+    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+    const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+    const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
+    return kOperandOffsets[i];
+  }
+
+  static const int kOperandCount = 3;
+  static const int kSize =
+      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+      OperandTraits<operand_2>::kSize;
+};
+
+template <OperandType operand_0, OperandType operand_1>
+struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+  static inline OperandType GetOperandType(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const OperandType kOperands[] = {operand_0, operand_1};
+    return kOperands[i];
+  }
+
+  static inline OperandSize GetOperandSize(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const OperandSize kOperandSizes[] =
+        {OperandTraits<operand_0>::kSizeType,
+         OperandTraits<operand_1>::kSizeType};
+    return kOperandSizes[i];
+  }
+
+  static inline int GetOperandOffset(int i) {
+    DCHECK(0 <= i && i < kOperandCount);
+    const int kOffset0 = 1;
+    const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+    const int kOperandOffsets[] = {kOffset0, kOffset1};
+    return kOperandOffsets[i];
+  }
+
+  static const int kOperandCount = 2;
+  static const int kSize =
+      1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
+};
+
+template <OperandType operand_0>
+struct BytecodeTraits<operand_0, OPERAND_TERM> {
+  static inline OperandType GetOperandType(int i) {
+    DCHECK(i == 0);
+    return operand_0;
+  }
+
+  static inline OperandSize GetOperandSize(int i) {
+    DCHECK(i == 0);
+    return OperandTraits<operand_0>::kSizeType;
+  }
+
+  static inline int GetOperandOffset(int i) {
+    DCHECK(i == 0);
+    return 1;
+  }
+
+  static const int kOperandCount = 1;
+  static const int kSize = 1 + OperandTraits<operand_0>::kSize;
+};
+
+template <>
+struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+  static inline OperandType GetOperandType(int i) {
+    UNREACHABLE();
+    return OperandType::kNone;
+  }
+
+  static inline OperandSize GetOperandSize(int i) {
+    UNREACHABLE();
+    return OperandSize::kNone;
+  }
+
+  static inline int GetOperandOffset(int i) {
+    UNREACHABLE();
+    return 1;
+  }
+
+  static const int kOperandCount = 0;
+  static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_TRAITS_H_
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
new file mode 100644
index 0000000..2d4406c
--- /dev/null
+++ b/src/interpreter/bytecodes.cc
@@ -0,0 +1,453 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecodes.h"
+
+#include "src/frames.h"
+#include "src/interpreter/bytecode-traits.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+
+// static
+const char* Bytecodes::ToString(Bytecode bytecode) {
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return #Name;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return "";
+}
+
+
+// static
+const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return #Name;
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return "";
+}
+
+
+// static
+const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
+  switch (operand_size) {
+    case OperandSize::kNone:
+      return "None";
+    case OperandSize::kByte:
+      return "Byte";
+    case OperandSize::kShort:
+      return "Short";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+
+// static
+uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+  return static_cast<uint8_t>(bytecode);
+}
+
+
+// static
+Bytecode Bytecodes::FromByte(uint8_t value) {
+  Bytecode bytecode = static_cast<Bytecode>(value);
+  DCHECK(bytecode <= Bytecode::kLast);
+  return bytecode;
+}
+
+
+// static
+int Bytecodes::Size(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+// static
+int Bytecodes::NumberOfOperands(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+// static
+OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandType::kNone;
+}
+
+
+// static
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandSize::kNone;
+}
+
+
+// static
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+// static
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, Size)     \
+  case OperandType::k##Name: \
+    return Size;
+    OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return OperandSize::kNone;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpIfTrue ||
+         bytecode == Bytecode::kJumpIfFalse ||
+         bytecode == Bytecode::kJumpIfToBooleanTrue ||
+         bytecode == Bytecode::kJumpIfToBooleanFalse ||
+         bytecode == Bytecode::kJumpIfNull ||
+         bytecode == Bytecode::kJumpIfUndefined;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpIfTrueConstant ||
+         bytecode == Bytecode::kJumpIfFalseConstant ||
+         bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+         bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+         bytecode == Bytecode::kJumpIfNullConstant ||
+         bytecode == Bytecode::kJumpIfUndefinedConstant;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpIfTrueConstantWide ||
+         bytecode == Bytecode::kJumpIfFalseConstantWide ||
+         bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
+         bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+         bytecode == Bytecode::kJumpIfNullConstantWide ||
+         bytecode == Bytecode::kJumpIfUndefinedConstantWide;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
+  return IsConditionalJumpImmediate(bytecode) ||
+         IsConditionalJumpConstant(bytecode) ||
+         IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
+  return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpConstant ||
+         IsConditionalJumpConstant(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
+  return bytecode == Bytecode::kJumpConstantWide ||
+         IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJump(Bytecode bytecode) {
+  return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
+         IsJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
+  return bytecode == Bytecode::kReturn || IsJump(bytecode);
+}
+
+
+// static
+std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
+                                int parameter_count) {
+  Vector<char> buf = Vector<char>::New(50);
+
+  Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
+  int bytecode_size = Bytecodes::Size(bytecode);
+
+  for (int i = 0; i < bytecode_size; i++) {
+    SNPrintF(buf, "%02x ", bytecode_start[i]);
+    os << buf.start();
+  }
+  const int kBytecodeColumnSize = 6;
+  for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
+    os << "   ";
+  }
+
+  os << bytecode << " ";
+
+  int number_of_operands = NumberOfOperands(bytecode);
+  for (int i = 0; i < number_of_operands; i++) {
+    OperandType op_type = GetOperandType(bytecode, i);
+    const uint8_t* operand_start =
+        &bytecode_start[GetOperandOffset(bytecode, i)];
+    switch (op_type) {
+      case interpreter::OperandType::kCount8:
+        os << "#" << static_cast<unsigned int>(*operand_start);
+        break;
+      case interpreter::OperandType::kCount16:
+        os << '#' << ReadUnalignedUInt16(operand_start);
+        break;
+      case interpreter::OperandType::kIdx8:
+        os << "[" << static_cast<unsigned int>(*operand_start) << "]";
+        break;
+      case interpreter::OperandType::kIdx16:
+        os << "[" << ReadUnalignedUInt16(operand_start) << "]";
+        break;
+      case interpreter::OperandType::kImm8:
+        os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
+        break;
+      case interpreter::OperandType::kReg8:
+      case interpreter::OperandType::kMaybeReg8: {
+        Register reg = Register::FromOperand(*operand_start);
+        if (reg.is_function_context()) {
+          os << "<context>";
+        } else if (reg.is_function_closure()) {
+          os << "<closure>";
+        } else if (reg.is_new_target()) {
+          os << "<new.target>";
+        } else if (reg.is_parameter()) {
+          int parameter_index = reg.ToParameterIndex(parameter_count);
+          if (parameter_index == 0) {
+            os << "<this>";
+          } else {
+            os << "a" << parameter_index - 1;
+          }
+        } else {
+          os << "r" << reg.index();
+        }
+        break;
+      }
+      case interpreter::OperandType::kRegPair8: {
+        Register reg = Register::FromOperand(*operand_start);
+        if (reg.is_parameter()) {
+          int parameter_index = reg.ToParameterIndex(parameter_count);
+          DCHECK_NE(parameter_index, 0);
+          os << "a" << parameter_index - 1 << "-" << parameter_index;
+        } else {
+          os << "r" << reg.index() << "-" << reg.index() + 1;
+        }
+        break;
+      }
+      case interpreter::OperandType::kReg16: {
+        Register reg =
+            Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+        if (reg.is_parameter()) {
+          int parameter_index = reg.ToParameterIndex(parameter_count);
+          DCHECK_NE(parameter_index, 0);
+          os << "a" << parameter_index - 1;
+        } else {
+          os << "r" << reg.index();
+        }
+        break;
+      }
+      case interpreter::OperandType::kNone:
+        UNREACHABLE();
+        break;
+    }
+    if (i != number_of_operands - 1) {
+      os << ", ";
+    }
+  }
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
+  return os << Bytecodes::ToString(bytecode);
+}
+
+
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
+  return os << Bytecodes::OperandTypeToString(operand_type);
+}
+
+
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
+  return os << Bytecodes::OperandSizeToString(operand_size);
+}
+
+
+static const int kLastParamRegisterIndex =
+    -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+static const int kFunctionClosureRegisterIndex =
+    -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
+static const int kFunctionContextRegisterIndex =
+    -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+static const int kNewTargetRegisterIndex =
+    -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
+
+
+// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
+// Parameter indices are biased with the negative value kLastParamRegisterIndex
+// for ease of access in the interpreter.
+static const int kMaxParameterIndex = 128 + kLastParamRegisterIndex;
+
+
+Register Register::FromParameterIndex(int index, int parameter_count) {
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, parameter_count);
+  DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
+  int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+  DCHECK_LT(register_index, 0);
+  DCHECK_GE(register_index, kMinInt8);
+  return Register(register_index);
+}
+
+
+int Register::ToParameterIndex(int parameter_count) const {
+  DCHECK(is_parameter());
+  return index() - kLastParamRegisterIndex + parameter_count - 1;
+}
+
+
+Register Register::function_closure() {
+  return Register(kFunctionClosureRegisterIndex);
+}
+
+
+bool Register::is_function_closure() const {
+  return index() == kFunctionClosureRegisterIndex;
+}
+
+
+Register Register::function_context() {
+  return Register(kFunctionContextRegisterIndex);
+}
+
+
+bool Register::is_function_context() const {
+  return index() == kFunctionContextRegisterIndex;
+}
+
+
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+
+bool Register::is_new_target() const {
+  return index() == kNewTargetRegisterIndex;
+}
+
+
+int Register::MaxParameterIndex() { return kMaxParameterIndex; }
+
+
+uint8_t Register::ToOperand() const {
+  DCHECK_GE(index_, kMinInt8);
+  DCHECK_LE(index_, kMaxInt8);
+  return static_cast<uint8_t>(-index_);
+}
+
+
+Register Register::FromOperand(uint8_t operand) {
+  return Register(-static_cast<int8_t>(operand));
+}
+
+
+uint16_t Register::ToWideOperand() const {
+  DCHECK_GE(index_, kMinInt16);
+  DCHECK_LE(index_, kMaxInt16);
+  return static_cast<uint16_t>(-index_);
+}
+
+
+Register Register::FromWideOperand(uint16_t operand) {
+  return Register(-static_cast<int16_t>(operand));
+}
+
+
+bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
+                             Register reg4, Register reg5) {
+  if (reg1.index() + 1 != reg2.index()) {
+    return false;
+  }
+  if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
+    return false;
+  }
+  if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
+    return false;
+  }
+  if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
+    return false;
+  }
+  return true;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
new file mode 100644
index 0000000..a9beb6c
--- /dev/null
+++ b/src/interpreter/bytecodes.h
@@ -0,0 +1,419 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODES_H_
+#define V8_INTERPRETER_BYTECODES_H_
+
+#include <iosfwd>
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter here!
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// The list of operand types used by bytecodes.
+#define OPERAND_TYPE_LIST(V)       \
+                                   \
+  /* None operand. */              \
+  V(None, OperandSize::kNone)      \
+                                   \
+  /* Byte operands. */             \
+  V(Count8, OperandSize::kByte)    \
+  V(Imm8, OperandSize::kByte)      \
+  V(Idx8, OperandSize::kByte)      \
+  V(MaybeReg8, OperandSize::kByte) \
+  V(Reg8, OperandSize::kByte)      \
+  V(RegPair8, OperandSize::kByte)  \
+                                   \
+  /* Short operands. */            \
+  V(Count16, OperandSize::kShort)  \
+  V(Idx16, OperandSize::kShort)    \
+  V(Reg16, OperandSize::kShort)
+
+// The list of bytecodes which are interpreted by the interpreter.
+#define BYTECODE_LIST(V)                                                       \
+                                                                               \
+  /* Loading the accumulator */                                                \
+  V(LdaZero, OperandType::kNone)                                               \
+  V(LdaSmi8, OperandType::kImm8)                                               \
+  V(LdaUndefined, OperandType::kNone)                                          \
+  V(LdaNull, OperandType::kNone)                                               \
+  V(LdaTheHole, OperandType::kNone)                                            \
+  V(LdaTrue, OperandType::kNone)                                               \
+  V(LdaFalse, OperandType::kNone)                                              \
+  V(LdaConstant, OperandType::kIdx8)                                           \
+  V(LdaConstantWide, OperandType::kIdx16)                                      \
+                                                                               \
+  /* Globals */                                                                \
+  V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
+  V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
+  V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8)       \
+  V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8)       \
+  V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
+  V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
+  V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+  V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+  V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
+  V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
+  V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
+  V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
+                                                                               \
+  /* Context operations */                                                     \
+  V(PushContext, OperandType::kReg8)                                           \
+  V(PopContext, OperandType::kReg8)                                            \
+  V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
+  V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8)                    \
+  V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
+  V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16)               \
+                                                                               \
+  /* Load-Store lookup slots */                                                \
+  V(LdaLookupSlot, OperandType::kIdx8)                                         \
+  V(LdaLookupSlotInsideTypeof, OperandType::kIdx8)                             \
+  V(LdaLookupSlotWide, OperandType::kIdx16)                                    \
+  V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16)                        \
+  V(StaLookupSlotSloppy, OperandType::kIdx8)                                   \
+  V(StaLookupSlotStrict, OperandType::kIdx8)                                   \
+  V(StaLookupSlotSloppyWide, OperandType::kIdx16)                              \
+  V(StaLookupSlotStrictWide, OperandType::kIdx16)                              \
+                                                                               \
+  /* Register-accumulator transfers */                                         \
+  V(Ldar, OperandType::kReg8)                                                  \
+  V(Star, OperandType::kReg8)                                                  \
+                                                                               \
+  /* Register-register transfers */                                            \
+  V(Mov, OperandType::kReg8, OperandType::kReg8)                               \
+  V(Exchange, OperandType::kReg8, OperandType::kReg16)                         \
+  V(ExchangeWide, OperandType::kReg16, OperandType::kReg16)                    \
+                                                                               \
+  /* LoadIC operations */                                                      \
+  V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
+  V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
+  V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8)                 \
+  V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8)                 \
+  /* TODO(rmcilroy): Wide register operands too? */                            \
+  V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                 \
+    OperandType::kIdx16)                                                       \
+  V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16,                 \
+    OperandType::kIdx16)                                                       \
+  V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16)            \
+  V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16)            \
+                                                                               \
+  /* StoreIC operations */                                                     \
+  V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+  V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+  V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8,                \
+    OperandType::kIdx8)                                                        \
+  V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8,                \
+    OperandType::kIdx8)                                                        \
+  /* TODO(rmcilroy): Wide register operands too? */                            \
+  V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                \
+    OperandType::kIdx16)                                                       \
+  V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16,                \
+    OperandType::kIdx16)                                                       \
+  V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8,            \
+    OperandType::kIdx16)                                                       \
+  V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8,            \
+    OperandType::kIdx16)                                                       \
+                                                                               \
+  /* Binary Operators */                                                       \
+  V(Add, OperandType::kReg8)                                                   \
+  V(Sub, OperandType::kReg8)                                                   \
+  V(Mul, OperandType::kReg8)                                                   \
+  V(Div, OperandType::kReg8)                                                   \
+  V(Mod, OperandType::kReg8)                                                   \
+  V(BitwiseOr, OperandType::kReg8)                                             \
+  V(BitwiseXor, OperandType::kReg8)                                            \
+  V(BitwiseAnd, OperandType::kReg8)                                            \
+  V(ShiftLeft, OperandType::kReg8)                                             \
+  V(ShiftRight, OperandType::kReg8)                                            \
+  V(ShiftRightLogical, OperandType::kReg8)                                     \
+                                                                               \
+  /* Unary Operators */                                                        \
+  V(Inc, OperandType::kNone)                                                   \
+  V(Dec, OperandType::kNone)                                                   \
+  V(LogicalNot, OperandType::kNone)                                            \
+  V(TypeOf, OperandType::kNone)                                                \
+  V(DeletePropertyStrict, OperandType::kReg8)                                  \
+  V(DeletePropertySloppy, OperandType::kReg8)                                  \
+  V(DeleteLookupSlot, OperandType::kNone)                                      \
+                                                                               \
+  /* Call operations */                                                        \
+  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8,        \
+    OperandType::kIdx8)                                                        \
+  V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16,   \
+    OperandType::kIdx16)                                                       \
+  V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8,                 \
+    OperandType::kCount8)                                                      \
+  V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8,          \
+    OperandType::kCount8, OperandType::kRegPair8)                              \
+  V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8,                    \
+    OperandType::kCount8)                                                      \
+                                                                               \
+  /* New operator */                                                           \
+  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8)    \
+                                                                               \
+  /* Test Operators */                                                         \
+  V(TestEqual, OperandType::kReg8)                                             \
+  V(TestNotEqual, OperandType::kReg8)                                          \
+  V(TestEqualStrict, OperandType::kReg8)                                       \
+  V(TestNotEqualStrict, OperandType::kReg8)                                    \
+  V(TestLessThan, OperandType::kReg8)                                          \
+  V(TestGreaterThan, OperandType::kReg8)                                       \
+  V(TestLessThanOrEqual, OperandType::kReg8)                                   \
+  V(TestGreaterThanOrEqual, OperandType::kReg8)                                \
+  V(TestInstanceOf, OperandType::kReg8)                                        \
+  V(TestIn, OperandType::kReg8)                                                \
+                                                                               \
+  /* Cast operators */                                                         \
+  V(ToName, OperandType::kNone)                                                \
+  V(ToNumber, OperandType::kNone)                                              \
+  V(ToObject, OperandType::kNone)                                              \
+                                                                               \
+  /* Literals */                                                               \
+  V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
+    OperandType::kImm8)                                                        \
+  V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8,                \
+    OperandType::kImm8)                                                        \
+  V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8,               \
+    OperandType::kImm8)                                                        \
+  V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
+    OperandType::kImm8)                                                        \
+  V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16,          \
+    OperandType::kImm8)                                                        \
+  V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16,         \
+    OperandType::kImm8)                                                        \
+                                                                               \
+  /* Closure allocation */                                                     \
+  V(CreateClosure, OperandType::kIdx8, OperandType::kImm8)                     \
+  V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8)                \
+                                                                               \
+  /* Arguments allocation */                                                   \
+  V(CreateMappedArguments, OperandType::kNone)                                 \
+  V(CreateUnmappedArguments, OperandType::kNone)                               \
+                                                                               \
+  /* Control Flow */                                                           \
+  V(Jump, OperandType::kImm8)                                                  \
+  V(JumpConstant, OperandType::kIdx8)                                          \
+  V(JumpConstantWide, OperandType::kIdx16)                                     \
+  V(JumpIfTrue, OperandType::kImm8)                                            \
+  V(JumpIfTrueConstant, OperandType::kIdx8)                                    \
+  V(JumpIfTrueConstantWide, OperandType::kIdx16)                               \
+  V(JumpIfFalse, OperandType::kImm8)                                           \
+  V(JumpIfFalseConstant, OperandType::kIdx8)                                   \
+  V(JumpIfFalseConstantWide, OperandType::kIdx16)                              \
+  V(JumpIfToBooleanTrue, OperandType::kImm8)                                   \
+  V(JumpIfToBooleanTrueConstant, OperandType::kIdx8)                           \
+  V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16)                      \
+  V(JumpIfToBooleanFalse, OperandType::kImm8)                                  \
+  V(JumpIfToBooleanFalseConstant, OperandType::kIdx8)                          \
+  V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16)                     \
+  V(JumpIfNull, OperandType::kImm8)                                            \
+  V(JumpIfNullConstant, OperandType::kIdx8)                                    \
+  V(JumpIfNullConstantWide, OperandType::kIdx16)                               \
+  V(JumpIfUndefined, OperandType::kImm8)                                       \
+  V(JumpIfUndefinedConstant, OperandType::kIdx8)                               \
+  V(JumpIfUndefinedConstantWide, OperandType::kIdx16)                          \
+                                                                               \
+  /* Complex flow control For..in */                                           \
+  V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8)  \
+  V(ForInDone, OperandType::kReg8, OperandType::kReg8)                         \
+  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8,     \
+    OperandType::kReg8)                                                        \
+  V(ForInStep, OperandType::kReg8)                                             \
+                                                                               \
+  /* Non-local flow control */                                                 \
+  V(Throw, OperandType::kNone)                                                 \
+  V(Return, OperandType::kNone)
+
+
+// Enumeration of the size classes of operand types used by bytecodes.
+enum class OperandSize : uint8_t {
+  kNone = 0,
+  kByte = 1,
+  kShort = 2,
+};
+
+
+// Enumeration of operand types used by bytecodes.
+enum class OperandType : uint8_t {
+#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
+  OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
+#undef DECLARE_OPERAND_TYPE
+#define COUNT_OPERAND_TYPES(x, _) +1
+  // The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
+  // evaluate to the same value as the last operand.
+  kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
+#undef COUNT_OPERAND_TYPES
+};
+
+
+// Enumeration of interpreter bytecodes.
+enum class Bytecode : uint8_t {
+#define DECLARE_BYTECODE(Name, ...) k##Name,
+  BYTECODE_LIST(DECLARE_BYTECODE)
+#undef DECLARE_BYTECODE
+#define COUNT_BYTECODE(x, ...) +1
+  // The COUNT_BYTECODE macro will turn this into kLast = -1 +1 +1... which will
+  // evaluate to the same value as the last real bytecode.
+  kLast = -1 BYTECODE_LIST(COUNT_BYTECODE)
+#undef COUNT_BYTECODE
+};
+
+
+// An interpreter Register which is located in the function's Register file
+// in its stack-frame. Register hold parameters, this, and expression values.
+class Register {
+ public:
+  Register() : index_(kIllegalIndex) {}
+
+  explicit Register(int index) : index_(index) {}
+
+  int index() const {
+    DCHECK(index_ != kIllegalIndex);
+    return index_;
+  }
+  bool is_parameter() const { return index() < 0; }
+  bool is_valid() const { return index_ != kIllegalIndex; }
+
+  static Register FromParameterIndex(int index, int parameter_count);
+  int ToParameterIndex(int parameter_count) const;
+  static int MaxParameterIndex();
+
+  // Returns the register for the function's closure object.
+  static Register function_closure();
+  bool is_function_closure() const;
+
+  // Returns the register for the function's outer context.
+  static Register function_context();
+  bool is_function_context() const;
+
+  // Returns the register for the incoming new target value.
+  static Register new_target();
+  bool is_new_target() const;
+
+  static Register FromOperand(uint8_t operand);
+  uint8_t ToOperand() const;
+
+  static Register FromWideOperand(uint16_t operand);
+  uint16_t ToWideOperand() const;
+
+  static bool AreContiguous(Register reg1, Register reg2,
+                            Register reg3 = Register(),
+                            Register reg4 = Register(),
+                            Register reg5 = Register());
+
+  bool operator==(const Register& other) const {
+    return index() == other.index();
+  }
+  bool operator!=(const Register& other) const {
+    return index() != other.index();
+  }
+  bool operator<(const Register& other) const {
+    return index() < other.index();
+  }
+  bool operator<=(const Register& other) const {
+    return index() <= other.index();
+  }
+
+ private:
+  static const int kIllegalIndex = kMaxInt;
+
+  void* operator new(size_t size);
+  void operator delete(void* p);
+
+  int index_;
+};
+
+
+class Bytecodes {
+ public:
+  // Returns string representation of |bytecode|.
+  static const char* ToString(Bytecode bytecode);
+
+  // Returns string representation of |operand_type|.
+  static const char* OperandTypeToString(OperandType operand_type);
+
+  // Returns string representation of |operand_size|.
+  static const char* OperandSizeToString(OperandSize operand_size);
+
+  // Returns byte value of bytecode.
+  static uint8_t ToByte(Bytecode bytecode);
+
+  // Returns bytecode for |value|.
+  static Bytecode FromByte(uint8_t value);
+
+  // Returns the number of operands expected by |bytecode|.
+  static int NumberOfOperands(Bytecode bytecode);
+
+  // Return the i-th operand of |bytecode|.
+  static OperandType GetOperandType(Bytecode bytecode, int i);
+
+  // Return the size of the i-th operand of |bytecode|.
+  static OperandSize GetOperandSize(Bytecode bytecode, int i);
+
+  // Returns the offset of the i-th operand of |bytecode| relative to the start
+  // of the bytecode.
+  static int GetOperandOffset(Bytecode bytecode, int i);
+
+  // Returns the size of the bytecode including its operands.
+  static int Size(Bytecode bytecode);
+
+  // Returns the size of |operand|.
+  static OperandSize SizeOfOperand(OperandType operand);
+
+  // Return true if the bytecode is a conditional jump taking
+  // an immediate byte operand (OperandType::kImm8).
+  static bool IsConditionalJumpImmediate(Bytecode bytecode);
+
+  // Return true if the bytecode is a conditional jump taking
+  // a constant pool entry (OperandType::kIdx8).
+  static bool IsConditionalJumpConstant(Bytecode bytecode);
+
+  // Return true if the bytecode is a conditional jump taking
+  // a constant pool entry (OperandType::kIdx16).
+  static bool IsConditionalJumpConstantWide(Bytecode bytecode);
+
+  // Return true if the bytecode is a conditional jump taking
+  // any kind of operand.
+  static bool IsConditionalJump(Bytecode bytecode);
+
+  // Return true if the bytecode is a jump or a conditional jump taking
+  // an immediate byte operand (OperandType::kImm8).
+  static bool IsJumpImmediate(Bytecode bytecode);
+
+  // Return true if the bytecode is a jump or conditional jump taking a
+  // constant pool entry (OperandType::kIdx8).
+  static bool IsJumpConstant(Bytecode bytecode);
+
+  // Return true if the bytecode is a jump or conditional jump taking a
+  // constant pool entry (OperandType::kIdx16).
+  static bool IsJumpConstantWide(Bytecode bytecode);
+
+  // Return true if the bytecode is a jump or conditional jump taking
+  // any kind of operand.
+  static bool IsJump(Bytecode bytecode);
+
+  // Return true if the bytecode is a conditional jump, a jump, or a return.
+  static bool IsJumpOrReturn(Bytecode bytecode);
+
+  // Decode a single bytecode and operands to |os|.
+  static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
+                              int number_of_parameters);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
+};
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODES_H_
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
new file mode 100644
index 0000000..2586e1f
--- /dev/null
+++ b/src/interpreter/constant-array-builder.cc
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/constant-array-builder.h"
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
+                                                             size_t start_index,
+                                                             size_t capacity)
+    : start_index_(start_index),
+      capacity_(capacity),
+      reserved_(0),
+      constants_(zone) {}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
+  DCHECK_GT(available(), 0u);
+  reserved_++;
+  DCHECK_LE(reserved_, capacity() - constants_.size());
+}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
+  DCHECK_GT(reserved_, 0u);
+  reserved_--;
+}
+
+
+size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
+    Handle<Object> object) {
+  DCHECK_GT(available(), 0u);
+  size_t index = constants_.size();
+  DCHECK_LT(index, capacity());
+  constants_.push_back(object);
+  return index + start_index();
+}
+
+
+Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
+    size_t index) const {
+  return constants_[index - start_index()];
+}
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
+
+
+ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
+    : isolate_(isolate),
+      idx8_slice_(zone, 0, kLowCapacity),
+      idx16_slice_(zone, kLowCapacity, kHighCapacity),
+      constants_map_(isolate->heap(), zone) {
+  STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
+  DCHECK_EQ(idx8_slice_.start_index(), 0u);
+  DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
+  DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
+  DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+}
+
+
+size_t ConstantArrayBuilder::size() const {
+  if (idx16_slice_.size() > 0) {
+    return idx16_slice_.start_index() + idx16_slice_.size();
+  } else {
+    return idx8_slice_.size();
+  }
+}
+
+
+Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+  if (index >= idx16_slice_.start_index()) {
+    return idx16_slice_.At(index);
+  } else if (index < idx8_slice_.size()) {
+    return idx8_slice_.At(index);
+  } else {
+    return isolate_->factory()->the_hole_value();
+  }
+}
+
+
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
+  Handle<FixedArray> fixed_array =
+      factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+  for (int i = 0; i < fixed_array->length(); i++) {
+    fixed_array->set(i, *At(static_cast<size_t>(i)));
+  }
+  return fixed_array;
+}
+
+
+size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+  index_t* entry = constants_map_.Find(object);
+  return (entry == nullptr) ? AllocateEntry(object) : *entry;
+}
+
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
+    Handle<Object> object) {
+  DCHECK(!object->IsOddball());
+  size_t index;
+  index_t* entry = constants_map_.Get(object);
+  if (idx8_slice_.available() > 0) {
+    index = idx8_slice_.Allocate(object);
+  } else {
+    index = idx16_slice_.Allocate(object);
+  }
+  CHECK_LT(index, kMaxCapacity);
+  *entry = static_cast<index_t>(index);
+  return *entry;
+}
+
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+  if (idx8_slice_.available() > 0) {
+    idx8_slice_.Reserve();
+    return OperandSize::kByte;
+  } else if (idx16_slice_.available() > 0) {
+    idx16_slice_.Reserve();
+    return OperandSize::kShort;
+  } else {
+    UNREACHABLE();
+    return OperandSize::kNone;
+  }
+}
+
+
+size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
+                                                 Handle<Object> object) {
+  DiscardReservedEntry(operand_size);
+  size_t index;
+  index_t* entry = constants_map_.Find(object);
+  if (nullptr == entry) {
+    index = AllocateEntry(object);
+  } else {
+    if (operand_size == OperandSize::kByte &&
+        *entry >= idx8_slice_.capacity()) {
+      // The object is already in the constant array, but has an index
+      // outside the range of an idx8 operand so we need to create a
+      // duplicate entry in the idx8 operand range to satisfy the
+      // commitment.
+      *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+    }
+    index = *entry;
+  }
+  DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
+  DCHECK_LT(index, kMaxCapacity);
+  return index;
+}
+
+
+void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
+  switch (operand_size) {
+    case OperandSize::kByte:
+      idx8_slice_.Unreserve();
+      return;
+    case OperandSize::kShort:
+      idx16_slice_.Unreserve();
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
new file mode 100644
index 0000000..c882b1d
--- /dev/null
+++ b/src/interpreter/constant-array-builder.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+
+#include "src/identity-map.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Factory;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing constant arrays for the interpreter.
+class ConstantArrayBuilder final : public ZoneObject {
+ public:
+  // Capacity of the 8-bit operand slice.
+  static const size_t kLowCapacity = 1u << kBitsPerByte;
+
+  // Capacity of the combined 8-bit and 16-bit operand slices.
+  static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+
+  // Capacity of the 16-bit operand slice.
+  static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+
+  ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+
+  // Generate a fixed array of constants based on inserted objects.
+  Handle<FixedArray> ToFixedArray(Factory* factory) const;
+
+  // Returns the object in the constant pool array that at index
+  // |index|.
+  Handle<Object> At(size_t index) const;
+
+  // Returns the number of elements in the array.
+  size_t size() const;
+
+  // Insert an object into the constants array if it is not already
+  // present. Returns the array index associated with the object.
+  size_t Insert(Handle<Object> object);
+
+  // Creates a reserved entry in the constant pool and returns
+  // the size of the operand that'll be required to hold the entry
+  // when committed.
+  OperandSize CreateReservedEntry();
+
+  // Commit reserved entry and returns the constant pool index for the
+  // object.
+  size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+
+  // Discards constant pool reservation.
+  void DiscardReservedEntry(OperandSize operand_size);
+
+ private:
+  typedef uint16_t index_t;
+
+  index_t AllocateEntry(Handle<Object> object);
+
+  struct ConstantArraySlice final {
+    ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+    void Reserve();
+    void Unreserve();
+    size_t Allocate(Handle<Object> object);
+    Handle<Object> At(size_t index) const;
+
+    inline size_t available() const { return capacity() - reserved() - size(); }
+    inline size_t reserved() const { return reserved_; }
+    inline size_t capacity() const { return capacity_; }
+    inline size_t size() const { return constants_.size(); }
+    inline size_t start_index() const { return start_index_; }
+
+   private:
+    const size_t start_index_;
+    const size_t capacity_;
+    size_t reserved_;
+    ZoneVector<Handle<Object>> constants_;
+
+    DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
+  };
+
+  Isolate* isolate_;
+  ConstantArraySlice idx8_slice_;
+  ConstantArraySlice idx16_slice_;
+  IdentityMap<index_t> constants_map_;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
new file mode 100644
index 0000000..99066e8
--- /dev/null
+++ b/src/interpreter/control-flow-builders.cc
@@ -0,0 +1,142 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/control-flow-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+
+BreakableControlFlowBuilder::~BreakableControlFlowBuilder() {
+  DCHECK(break_sites_.empty());
+}
+
+
+void BreakableControlFlowBuilder::SetBreakTarget(const BytecodeLabel& target) {
+  BindLabels(target, &break_sites_);
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites) {
+  sites->push_back(BytecodeLabel());
+  builder()->Jump(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+    ZoneVector<BytecodeLabel>* sites) {
+  sites->push_back(BytecodeLabel());
+  builder()->JumpIfTrue(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+    ZoneVector<BytecodeLabel>* sites) {
+  sites->push_back(BytecodeLabel());
+  builder()->JumpIfFalse(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfUndefined(
+    ZoneVector<BytecodeLabel>* sites) {
+  sites->push_back(BytecodeLabel());
+  builder()->JumpIfUndefined(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfNull(
+    ZoneVector<BytecodeLabel>* sites) {
+  sites->push_back(BytecodeLabel());
+  builder()->JumpIfNull(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites,
+                                           int index) {
+  builder()->Jump(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+    ZoneVector<BytecodeLabel>* sites, int index) {
+  builder()->JumpIfTrue(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+    ZoneVector<BytecodeLabel>* sites, int index) {
+  builder()->JumpIfFalse(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
+                                             ZoneVector<BytecodeLabel>* sites) {
+  for (size_t i = 0; i < sites->size(); i++) {
+    BytecodeLabel& site = sites->at(i);
+    builder()->Bind(target, &site);
+  }
+  sites->clear();
+}
+
+
+void BlockBuilder::EndBlock() {
+  builder()->Bind(&block_end_);
+  SetBreakTarget(block_end_);
+}
+
+
+LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
+
+
+void LoopBuilder::LoopHeader() {
+  // Jumps from before the loop header into the loop violate ordering
+  // requirements of bytecode basic blocks. The only entry into a loop
+  // must be the loop header. Surely breaks is okay? Not if nested
+  // and misplaced between the headers.
+  DCHECK(break_sites_.empty() && continue_sites_.empty());
+  builder()->Bind(&loop_header_);
+}
+
+
+void LoopBuilder::EndLoop() {
+  // Loop must have closed form, i.e. all loop elements are within the loop,
+  // the loop header precedes the body and next elements in the loop.
+  DCHECK(loop_header_.is_bound());
+  builder()->Bind(&loop_end_);
+  SetBreakTarget(loop_end_);
+  if (next_.is_bound()) {
+    DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
+    SetContinueTarget(next_);
+  } else {
+    DCHECK(condition_.is_bound());
+    DCHECK_GE(condition_.offset(), loop_header_.offset());
+    DCHECK_LE(condition_.offset(), loop_end_.offset());
+    SetContinueTarget(condition_);
+  }
+}
+
+
+void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
+  BindLabels(target, &continue_sites_);
+}
+
+
+SwitchBuilder::~SwitchBuilder() {
+#ifdef DEBUG
+  for (auto site : case_sites_) {
+    DCHECK(site.is_bound());
+  }
+#endif
+}
+
+
+void SwitchBuilder::SetCaseTarget(int index) {
+  BytecodeLabel& site = case_sites_.at(index);
+  builder()->Bind(&site);
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
new file mode 100644
index 0000000..24a7dfe
--- /dev/null
+++ b/src/interpreter/control-flow-builders.h
@@ -0,0 +1,151 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+#define V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ControlFlowBuilder BASE_EMBEDDED {
+ public:
+  explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
+      : builder_(builder) {}
+  virtual ~ControlFlowBuilder() {}
+
+ protected:
+  BytecodeArrayBuilder* builder() const { return builder_; }
+
+ private:
+  BytecodeArrayBuilder* builder_;
+
+  DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
+};
+
+class BreakableControlFlowBuilder : public ControlFlowBuilder {
+ public:
+  explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
+      : ControlFlowBuilder(builder),
+        break_sites_(builder->zone()) {}
+  virtual ~BreakableControlFlowBuilder();
+
+  // This method should be called by the control flow owner before
+  // destruction to update sites that emit jumps for break.
+  void SetBreakTarget(const BytecodeLabel& break_target);
+
+  // This method is called when visiting break statements in the AST.
+  // Inserts a jump to a unbound label that is patched when the corresponding
+  // SetBreakTarget is called.
+  void Break() { EmitJump(&break_sites_); }
+  void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
+  void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
+  void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
+  void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
+
+ protected:
+  void EmitJump(ZoneVector<BytecodeLabel>* labels);
+  void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
+  void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
+  void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
+  void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
+  void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
+  void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
+  void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
+
+  void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
+
+  // Unbound labels that identify jumps for break statements in the code.
+  ZoneVector<BytecodeLabel> break_sites_;
+};
+
+
+// Class to track control flow for block statements (which can break in JS).
+class BlockBuilder final : public BreakableControlFlowBuilder {
+ public:
+  explicit BlockBuilder(BytecodeArrayBuilder* builder)
+      : BreakableControlFlowBuilder(builder) {}
+
+  void EndBlock();
+
+ private:
+  BytecodeLabel block_end_;
+};
+
+
+// A class to help with co-ordinating break and continue statements with
+// their loop.
+class LoopBuilder final : public BreakableControlFlowBuilder {
+ public:
+  explicit LoopBuilder(BytecodeArrayBuilder* builder)
+      : BreakableControlFlowBuilder(builder),
+        continue_sites_(builder->zone()) {}
+  ~LoopBuilder();
+
+  void LoopHeader();
+  void Condition() { builder()->Bind(&condition_); }
+  void Next() { builder()->Bind(&next_); }
+  void JumpToHeader() { builder()->Jump(&loop_header_); }
+  void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+  void EndLoop();
+
+  // This method is called when visiting continue statements in the AST.
+  // Inserts a jump to a unbound label that is patched when the corresponding
+  // SetContinueTarget is called.
+  void Continue() { EmitJump(&continue_sites_); }
+  void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
+  void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
+  void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
+
+ private:
+  void SetContinueTarget(const BytecodeLabel& continue_target);
+
+  BytecodeLabel loop_header_;
+  BytecodeLabel condition_;
+  BytecodeLabel next_;
+  BytecodeLabel loop_end_;
+
+  // Unbound labels that identify jumps for continue statements in the code.
+  ZoneVector<BytecodeLabel> continue_sites_;
+};
+
+
+// A class to help with co-ordinating break statements with their switch.
+class SwitchBuilder final : public BreakableControlFlowBuilder {
+ public:
+  explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
+      : BreakableControlFlowBuilder(builder),
+        case_sites_(builder->zone()) {
+    case_sites_.resize(number_of_cases);
+  }
+  ~SwitchBuilder();
+
+  // This method should be called by the SwitchBuilder owner when the case
+  // statement with |index| is emitted to update the case jump site.
+  void SetCaseTarget(int index);
+
+  // This method is called when visiting case comparison operation for |index|.
+  // Inserts a JumpIfTrue to a unbound label that is patched when the
+  // corresponding SetCaseTarget is called.
+  void Case(int index) { EmitJumpIfTrue(&case_sites_, index); }
+
+  // This method is called when all cases comparisons have been emitted if there
+  // is a default case statement. Inserts a Jump to a unbound label that is
+  // patched when the corresponding SetCaseTarget is called.
+  void DefaultAt(int index) { EmitJump(&case_sites_, index); }
+
+ private:
+  // Unbound labels that identify jumps for case statements in the code.
+  ZoneVector<BytecodeLabel> case_sites_;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
new file mode 100644
index 0000000..574602b
--- /dev/null
+++ b/src/interpreter/interpreter.cc
@@ -0,0 +1,1780 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter.h"
+
+#include "src/code-factory.h"
+#include "src/compiler.h"
+#include "src/compiler/interpreter-assembler.h"
+#include "src/factory.h"
+#include "src/interpreter/bytecode-generator.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+#define __ assembler->
+
+
+Interpreter::Interpreter(Isolate* isolate)
+    : isolate_(isolate) {}
+
+
+// static
+Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
+    Isolate* isolate) {
+  Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
+      static_cast<int>(Bytecode::kLast) + 1, TENURED);
+  // We rely on the interpreter handler table being immovable, so check that
+  // it was allocated on the first page (which is always immovable).
+  DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
+      handler_table->address()));
+  return handler_table;
+}
+
+
+void Interpreter::Initialize() {
+  DCHECK(FLAG_ignition);
+  Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
+  if (!IsInterpreterTableInitialized(handler_table)) {
+    Zone zone;
+    HandleScope scope(isolate_);
+
+#define GENERATE_CODE(Name, ...)                                      \
+    {                                                                 \
+      compiler::InterpreterAssembler assembler(isolate_, &zone,       \
+                                               Bytecode::k##Name);    \
+      Do##Name(&assembler);                                           \
+      Handle<Code> code = assembler.GenerateCode();                   \
+      handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
+    }
+    BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
+  }
+}
+
+
+bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  BytecodeGenerator generator(info->isolate(), info->zone());
+  info->EnsureFeedbackVector();
+  Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+  if (FLAG_print_bytecode) {
+    OFStream os(stdout);
+    os << "Function: " << info->GetDebugName().get() << std::endl;
+    bytecodes->Print(os);
+    os << std::flush;
+  }
+
+  info->SetBytecodeArray(bytecodes);
+  info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
+  return true;
+}
+
+
+bool Interpreter::IsInterpreterTableInitialized(
+    Handle<FixedArray> handler_table) {
+  DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
+  return handler_table->get(0) != isolate_->heap()->undefined_value();
+}
+
+
+// LdaZero
+//
+// Load literal '0' into the accumulator.
+void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+  Node* zero_value = __ NumberConstant(0.0);
+  __ SetAccumulator(zero_value);
+  __ Dispatch();
+}
+
+
+// LdaSmi8 <imm8>
+//
+// Load an 8-bit integer literal into the accumulator as a Smi.
+void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+  Node* raw_int = __ BytecodeOperandImm(0);
+  Node* smi_int = __ SmiTag(raw_int);
+  __ SetAccumulator(smi_int);
+  __ Dispatch();
+}
+
+
+void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  __ SetAccumulator(constant);
+  __ Dispatch();
+}
+
+
+// LdaConstant <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+  DoLoadConstant(assembler);
+}
+
+
+// LdaConstantWide <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+  DoLoadConstant(assembler);
+}
+
+
+// LdaUndefined
+//
+// Load Undefined into the accumulator.
+void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+  __ SetAccumulator(undefined_value);
+  __ Dispatch();
+}
+
+
+// LdaNull
+//
+// Load Null into the accumulator.
+void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+  Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+  __ SetAccumulator(null_value);
+  __ Dispatch();
+}
+
+
+// LdaTheHole
+//
+// Load TheHole into the accumulator.
+void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  __ SetAccumulator(the_hole_value);
+  __ Dispatch();
+}
+
+
+// LdaTrue
+//
+// Load True into the accumulator.
+void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+  Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
+  __ SetAccumulator(true_value);
+  __ Dispatch();
+}
+
+
+// LdaFalse
+//
+// Load False into the accumulator.
+void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+  Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
+  __ SetAccumulator(false_value);
+  __ Dispatch();
+}
+
+
+// Ldar <src>
+//
+// Load accumulator with value from register <src>.
+void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* value = __ LoadRegister(reg_index);
+  __ SetAccumulator(value);
+  __ Dispatch();
+}
+
+
+// Star <dst>
+//
+// Store accumulator to register <dst>.
+void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* accumulator = __ GetAccumulator();
+  __ StoreRegister(accumulator, reg_index);
+  __ Dispatch();
+}
+
+
+// Exchange <reg8> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
+  Node* reg0_index = __ BytecodeOperandReg(0);
+  Node* reg1_index = __ BytecodeOperandReg(1);
+  Node* reg0_value = __ LoadRegister(reg0_index);
+  Node* reg1_value = __ LoadRegister(reg1_index);
+  __ StoreRegister(reg1_value, reg0_index);
+  __ StoreRegister(reg0_value, reg1_index);
+  __ Dispatch();
+}
+
+
+// ExchangeWide <reg16> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
+  return DoExchange(assembler);
+}
+
+
+// Mov <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+  Node* src_index = __ BytecodeOperandReg(0);
+  Node* src_value = __ LoadRegister(src_index);
+  Node* dst_index = __ BytecodeOperandReg(1);
+  __ StoreRegister(src_value, dst_index);
+  __ Dispatch();
+}
+
+
+void Interpreter::DoLoadGlobal(Callable ic,
+                               compiler::InterpreterAssembler* assembler) {
+  // Get the global object.
+  Node* context = __ GetContext();
+  Node* native_context =
+      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+  Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+
+  // Load the global via the LoadIC.
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* constant_index = __ BytecodeOperandIdx(0);
+  Node* name = __ LoadConstantPoolEntry(constant_index);
+  Node* raw_slot = __ BytecodeOperandIdx(1);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
+                           type_feedback_vector);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppy(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofStrict <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrict(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+
+void Interpreter::DoStoreGlobal(Callable ic,
+                                compiler::InterpreterAssembler* assembler) {
+  // Get the global object.
+  Node* context = __ GetContext();
+  Node* native_context =
+      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+  Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+
+  // Store the global via the StoreIC.
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* constant_index = __ BytecodeOperandIdx(0);
+  Node* name = __ LoadConstantPoolEntry(constant_index);
+  Node* value = __ GetAccumulator();
+  Node* raw_slot = __ BytecodeOperandIdx(1);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
+            type_feedback_vector);
+
+  __ Dispatch();
+}
+
+
+// StaGlobalSloppy <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrict <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalSloppyWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrictWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoStoreGlobal(ic, assembler);
+}
+
+
+// LdaContextSlot <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  Node* result = __ LoadContextSlot(context, slot_index);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// LdaContextSlotWide <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlotWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoLdaContextSlot(assembler);
+}
+
+
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  __ StoreContextSlot(context, slot_index, value);
+  __ Dispatch();
+}
+
+
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlotWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoStaContextSlot(assembler);
+}
+
+
+void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
+                                   compiler::InterpreterAssembler* assembler) {
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* name = __ LoadConstantPoolEntry(index);
+  Node* context = __ GetContext();
+  Node* result_pair = __ CallRuntime(function_id, context, name);
+  Node* result = __ Projection(0, result_pair);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+  DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeof(
+    compiler::InterpreterAssembler* assembler) {
+  DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+}
+
+
+// LdaLookupSlotWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlotWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoLdaLookupSlot(assembler);
+}
+
+
+// LdaLookupSlotInsideTypeofWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeofWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoLdaLookupSlotInsideTypeof(assembler);
+}
+
+
+void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
+                                    compiler::InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* name = __ LoadConstantPoolEntry(index);
+  Node* context = __ GetContext();
+  Node* language_mode_node = __ NumberConstant(language_mode);
+  Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
+                                language_mode_node);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// StaLookupSlotSloppy <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppy(
+    compiler::InterpreterAssembler* assembler) {
+  DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+}
+
+
+// StaLookupSlotStrict <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrict(
+    compiler::InterpreterAssembler* assembler) {
+  DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+}
+
+
+// StaLookupSlotSloppyWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoStaLookupSlotSloppy(assembler);
+}
+
+
+// StaLookupSlotStrictWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoStaLookupSlotStrict(assembler);
+}
+
+
+void Interpreter::DoLoadIC(Callable ic,
+                           compiler::InterpreterAssembler* assembler) {
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* register_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(register_index);
+  Node* constant_index = __ BytecodeOperandIdx(1);
+  Node* name = __ LoadConstantPoolEntry(constant_index);
+  Node* raw_slot = __ BytecodeOperandIdx(2);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
+                           type_feedback_vector);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// LoadICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrict <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+
+// LoadICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   SLOPPY, UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrictWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   STRICT, UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedLoadIC(Callable ic,
+                                compiler::InterpreterAssembler* assembler) {
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(reg_index);
+  Node* name = __ GetAccumulator();
+  Node* raw_slot = __ BytecodeOperandIdx(1);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
+                           type_feedback_vector);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// KeyedLoadICSloppy <object> <slot>
+//
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppy(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICStrict <object> <slot>
+//
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrict(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICSloppyWide <object> <slot>
+//
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICStrictWide <object> <slot>
+//
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoStoreIC(Callable ic,
+                            compiler::InterpreterAssembler* assembler) {
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* object_reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(object_reg_index);
+  Node* constant_index = __ BytecodeOperandIdx(1);
+  Node* name = __ LoadConstantPoolEntry(constant_index);
+  Node* value = __ GetAccumulator();
+  Node* raw_slot = __ BytecodeOperandIdx(2);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+            type_feedback_vector);
+  __ Dispatch();
+}
+
+
+// StoreICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrict <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoStoreIC(ic, assembler);
+}
+
+
+// StoreICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrictWide <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoStoreIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedStoreIC(Callable ic,
+                                 compiler::InterpreterAssembler* assembler) {
+  Node* code_target = __ HeapConstant(ic.code());
+  Node* object_reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(object_reg_index);
+  Node* name_reg_index = __ BytecodeOperandReg(1);
+  Node* name = __ LoadRegister(name_reg_index);
+  Node* value = __ GetAccumulator();
+  Node* raw_slot = __ BytecodeOperandIdx(2);
+  Node* smi_slot = __ SmiTag(raw_slot);
+  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+            type_feedback_vector);
+  __ Dispatch();
+}
+
+
+// KeyedStoreICSloppy <object> <key> <slot>
+//
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppy(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICStore <object> <key> <slot>
+//
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrict(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICSloppyWide <object> <key> <slot>
+//
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppyWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+  DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICStoreWide <object> <key> <slot>
+//
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrictWide(
+    compiler::InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+  DoKeyedStoreIC(ic, assembler);
+}
+
+
+// PushContext <context>
+//
+// Pushes the accumulator as the current context, and saves it in <context>
+void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ GetAccumulator();
+  __ SetContext(context);
+  __ StoreRegister(context, reg_index);
+  __ Dispatch();
+}
+
+
+// PopContext <context>
+//
+// Pops the current context and sets <context> as the new context.
+void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  __ SetContext(context);
+  __ Dispatch();
+}
+
+
+void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
+                             compiler::InterpreterAssembler* assembler) {
+  // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
+  // operations, instead of calling builtins directly.
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* lhs = __ LoadRegister(reg_index);
+  Node* rhs = __ GetAccumulator();
+  Node* result = __ CallRuntime(function_id, lhs, rhs);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// Add <src>
+//
+// Add register <src> to accumulator.
+void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kAdd, assembler);
+}
+
+
+// Sub <src>
+//
+// Subtract register <src> from accumulator.
+void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kSubtract, assembler);
+}
+
+
+// Mul <src>
+//
+// Multiply accumulator by register <src>.
+void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kMultiply, assembler);
+}
+
+
+// Div <src>
+//
+// Divide register <src> by accumulator.
+void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kDivide, assembler);
+}
+
+
+// Mod <src>
+//
+// Modulo register <src> by accumulator.
+void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kModulus, assembler);
+}
+
+
+// BitwiseOr <src>
+//
+// BitwiseOr register <src> to accumulator.
+void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kBitwiseOr, assembler);
+}
+
+
+// BitwiseXor <src>
+//
+// BitwiseXor register <src> to accumulator.
+void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kBitwiseXor, assembler);
+}
+
+
+// BitwiseAnd <src>
+//
+// BitwiseAnd register <src> to accumulator.
+void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+}
+
+
+// ShiftLeft <src>
+//
+// Left shifts register <src> by the count specified in the accumulator.
+// Register <src> is converted to an int32 and the accumulator to uint32
+// before the operation. 5 lsb bits from the accumulator are used as count
+// i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kShiftLeft, assembler);
+}
+
+
+// ShiftRight <src>
+//
+// Right shifts register <src> by the count specified in the accumulator.
+// Result is sign extended. Register <src> is converted to an int32 and the
+// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
+// are used as count i.e. <src> >> (accumulator & 0x1F).
+void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kShiftRight, assembler);
+}
+
+
+// ShiftRightLogical <src>
+//
+// Right Shifts register <src> by the count specified in the accumulator.
+// Result is zero-filled. The accumulator and register <src> are converted to
+// uint32 before the operation 5 lsb bits from the accumulator are used as
+// count i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftRightLogical(
+    compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+}
+
+
+void Interpreter::DoCountOp(Runtime::FunctionId function_id,
+                            compiler::InterpreterAssembler* assembler) {
+  Node* value = __ GetAccumulator();
+  Node* one = __ NumberConstant(1);
+  Node* result = __ CallRuntime(function_id, value, one);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// Inc
+//
+// Increments value in the accumulator by one.
+void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+  DoCountOp(Runtime::kAdd, assembler);
+}
+
+
+// Dec
+//
+// Decrements value in the accumulator by one.
+void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+  DoCountOp(Runtime::kSubtract, assembler);
+}
+
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, first casting the
+// accumulator to a boolean value if required.
+void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// TypeOf
+//
+// Load the accumulator with the string representating type of the
+// object in the accumulator.
+void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+void Interpreter::DoDelete(Runtime::FunctionId function_id,
+                           compiler::InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(reg_index);
+  Node* key = __ GetAccumulator();
+  Node* result = __ CallRuntime(function_id, object, key);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// DeletePropertyStrict
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following strict mode semantics.
+void Interpreter::DoDeletePropertyStrict(
+    compiler::InterpreterAssembler* assembler) {
+  DoDelete(Runtime::kDeleteProperty_Strict, assembler);
+}
+
+
+// DeletePropertySloppy
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following sloppy mode semantics.
+void Interpreter::DoDeletePropertySloppy(
+    compiler::InterpreterAssembler* assembler) {
+  DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
+}
+
+
+// DeleteLookupSlot
+//
+// Delete the variable with the name specified in the accumulator by dynamically
+// looking it up.
+void Interpreter::DoDeleteLookupSlot(
+    compiler::InterpreterAssembler* assembler) {
+  Node* name = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
+  Node* function_reg = __ BytecodeOperandReg(0);
+  Node* function = __ LoadRegister(function_reg);
+  Node* receiver_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(receiver_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
+  Node* result = __ CallJS(function, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// Call <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+  DoJSCall(assembler);
+}
+
+
+// CallWide <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
+  DoJSCall(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* result = __ CallRuntime(function_id, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(
+    compiler::InterpreterAssembler* assembler) {
+  // Call the runtime function.
+  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
+
+  // Store the results in <first_return> and <first_return + 1>
+  Node* first_return_reg = __ BytecodeOperandReg(3);
+  Node* second_return_reg = __ NextRegister(first_return_reg);
+  Node* result0 = __ Projection(0, result_pair);
+  Node* result1 = __ Projection(1, result_pair);
+  __ StoreRegister(result0, first_return_reg);
+  __ StoreRegister(result1, second_return_reg);
+
+  __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+  Node* context_index = __ BytecodeOperandIdx(0);
+  Node* receiver_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(receiver_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+
+  // Get the function to call from the native context.
+  Node* context = __ GetContext();
+  Node* native_context =
+      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+  Node* function = __ LoadContextSlot(native_context, context_index);
+
+  // Call the function.
+  Node* result = __ CallJS(function, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// New <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+//
+void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* result =
+      __ CallConstruct(constructor, constructor, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// TestEqual <src>
+//
+// Test if the value in the <src> register equals the accumulator.
+void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterEquals, assembler);
+}
+
+
+// TestNotEqual <src>
+//
+// Test if the value in the <src> register is not equal to the accumulator.
+void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
+}
+
+
+// TestEqualStrict <src>
+//
+// Test if the value in the <src> register is strictly equal to the accumulator.
+void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
+}
+
+
+// TestNotEqualStrict <src>
+//
+// Test if the value in the <src> register is not strictly equal to the
+// accumulator.
+void Interpreter::DoTestNotEqualStrict(
+    compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
+}
+
+
+// TestLessThan <src>
+//
+// Test if the value in the <src> register is less than the accumulator.
+void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
+}
+
+
+// TestGreaterThan <src>
+//
+// Test if the value in the <src> register is greater than the accumulator.
+void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
+}
+
+
+// TestLessThanOrEqual <src>
+//
+// Test if the value in the <src> register is less than or equal to the
+// accumulator.
+void Interpreter::DoTestLessThanOrEqual(
+    compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
+}
+
+
+// TestGreaterThanOrEqual <src>
+//
+// Test if the value in the <src> register is greater than or equal to the
+// accumulator.
+void Interpreter::DoTestGreaterThanOrEqual(
+    compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
+}
+
+
+// TestIn <src>
+//
+// Test if the object referenced by the register operand is a property of the
+// object referenced by the accumulator.
+void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kHasProperty, assembler);
+}
+
+
+// TestInstanceOf <src>
+//
+// Test if the object referenced by the <src> register is an an instance of type
+// referenced by the accumulator.
+void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kInstanceOf, assembler);
+}
+
+
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// ToNumber
+//
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// Jump <imm8>
+//
+// Jump by number of bytes represented by the immediate operand |imm8|.
+void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ Jump(relative_jump);
+}
+
+
+// JumpConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
+void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ Jump(relative_jump);
+}
+
+
+// JumpConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the
+// constant pool.
+void Interpreter::DoJumpConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpConstant(assembler);
+}
+
+
+// JumpIfTrue <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the
+// accumulator contains true.
+void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* true_value = __ BooleanConstant(true);
+  __ JumpIfWordEqual(accumulator, true_value, relative_jump);
+}
+
+
+// JumpIfTrueConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  Node* true_value = __ BooleanConstant(true);
+  __ JumpIfWordEqual(accumulator, true_value, relative_jump);
+}
+
+
+// JumpIfTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfTrueConstant(assembler);
+}
+
+
+// JumpIfFalse <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the
+// accumulator contains false.
+void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* false_value = __ BooleanConstant(false);
+  __ JumpIfWordEqual(accumulator, false_value, relative_jump);
+}
+
+
+// JumpIfFalseConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  Node* false_value = __ BooleanConstant(false);
+  __ JumpIfWordEqual(accumulator, false_value, relative_jump);
+}
+
+
+// JumpIfFalseConstant <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfFalseConstant(assembler);
+}
+
+
+// JumpIfToBooleanTrue <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is true when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanTrue(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* to_boolean_value =
+      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* true_value = __ BooleanConstant(true);
+  __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanTrueConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* to_boolean_value =
+      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  Node* true_value = __ BooleanConstant(true);
+  __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfToBooleanTrueConstant(assembler);
+}
+
+
+// JumpIfToBooleanFalse <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is false when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanFalse(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* to_boolean_value =
+      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* false_value = __ BooleanConstant(false);
+  __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalseConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* to_boolean_value =
+      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  Node* false_value = __ BooleanConstant(false);
+  __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalseConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfToBooleanFalseConstant(assembler);
+}
+
+
+// JumpIfNull <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfNullConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfNullConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfNullConstant(assembler);
+}
+
+
+// jumpifundefined <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// JumpIfUndefinedConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstant(
+    compiler::InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// JumpIfUndefinedConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstantWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoJumpIfUndefinedConstant(assembler);
+}
+
+
+void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
+                                  compiler::InterpreterAssembler* assembler) {
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant_elements = __ LoadConstantPoolEntry(index);
+  Node* literal_index_raw = __ BytecodeOperandIdx(1);
+  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* flags_raw = __ BytecodeOperandImm(2);
+  Node* flags = __ SmiTag(flags_raw);
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* result = __ CallRuntime(function_id, closure, literal_index,
+                                constant_elements, flags);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteral(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteralWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateArrayLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteral(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteralWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateObjectLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteral(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteralWide(
+    compiler::InterpreterAssembler* assembler) {
+  DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateClosure <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+  // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
+  // calling into the runtime.
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* shared = __ LoadConstantPoolEntry(index);
+  Node* tenured_raw = __ BytecodeOperandImm(1);
+  Node* tenured = __ SmiTag(tenured_raw);
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CreateClosureWide <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosureWide(
+    compiler::InterpreterAssembler* assembler) {
+  return DoCreateClosure(assembler);
+}
+
+
+// CreateMappedArguments
+//
+// Creates a new mapped arguments object.
+void Interpreter::DoCreateMappedArguments(
+    compiler::InterpreterAssembler* assembler) {
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CreateUnmappedArguments
+//
+// Creates a new unmapped arguments object.
+void Interpreter::DoCreateUnmappedArguments(
+    compiler::InterpreterAssembler* assembler) {
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// Throw
+//
+// Throws the exception in the accumulator.
+void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+  Node* exception = __ GetAccumulator();
+  __ CallRuntime(Runtime::kThrow, exception);
+  // We shouldn't ever return from a throw.
+  __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
+// Return
+//
+// Return the value in the accumulator.
+void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
+  __ Return();
+}
+
+
+// ForInPrepare <cache_type> <cache_array> <cache_length>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The registers |cache_type|, |cache_array|, and
+// |cache_length| represent output parameters.
+void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+  Node* object = __ GetAccumulator();
+  Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+  for (int i = 0; i < 3; i++) {
+    // 0 == cache_type, 1 == cache_array, 2 == cache_length
+    Node* cache_info = __ LoadFixedArrayElement(result, i);
+    Node* cache_info_reg = __ BytecodeOperandReg(i);
+    __ StoreRegister(cache_info, cache_info_reg);
+  }
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// ForInNext <receiver> <cache_type> <cache_array> <index>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
+  Node* receiver_reg = __ BytecodeOperandReg(0);
+  Node* receiver = __ LoadRegister(receiver_reg);
+  Node* cache_type_reg = __ BytecodeOperandReg(1);
+  Node* cache_type = __ LoadRegister(cache_type_reg);
+  Node* cache_array_reg = __ BytecodeOperandReg(2);
+  Node* cache_array = __ LoadRegister(cache_array_reg);
+  Node* index_reg = __ BytecodeOperandReg(3);
+  Node* index = __ LoadRegister(index_reg);
+  Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
+                                cache_type, index);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// ForInDone <index> <cache_length>
+//
+// Returns true if the end of the enumerable properties has been reached.
+void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+  // TODO(oth): Implement directly rather than making a runtime call.
+  Node* index_reg = __ BytecodeOperandReg(0);
+  Node* index = __ LoadRegister(index_reg);
+  Node* cache_length_reg = __ BytecodeOperandReg(1);
+  Node* cache_length = __ LoadRegister(cache_length_reg);
+  Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// ForInStep <index>
+//
+// Increments the loop counter in register |index| and stores the result
+// in the accumulator.
+void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+  // TODO(oth): Implement directly rather than making a runtime call.
+  Node* index_reg = __ BytecodeOperandReg(0);
+  Node* index = __ LoadRegister(index_reg);
+  Node* result = __ CallRuntime(Runtime::kForInStep, index);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
new file mode 100644
index 0000000..ef9b5d1
--- /dev/null
+++ b/src/interpreter/interpreter.h
@@ -0,0 +1,117 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_H_
+#define V8_INTERPRETER_INTERPRETER_H_
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter other than
+// src/interpreter/bytecodes.h here!
+#include "src/base/macros.h"
+#include "src/builtins.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/parsing/token.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Callable;
+class CompilationInfo;
+
+namespace compiler {
+class InterpreterAssembler;
+}
+
+namespace interpreter {
+
+class Interpreter {
+ public:
+  explicit Interpreter(Isolate* isolate);
+  virtual ~Interpreter() {}
+
+  // Creates an uninitialized interpreter handler table, where each handler
+  // points to the Illegal builtin.
+  static Handle<FixedArray> CreateUninitializedInterpreterTable(
+      Isolate* isolate);
+
+  // Initializes the interpreter.
+  void Initialize();
+
+  // Generate bytecode for |info|.
+  static bool MakeBytecode(CompilationInfo* info);
+
+ private:
+// Bytecode handler generator functions.
+#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
+  void Do##Name(compiler::InterpreterAssembler* assembler);
+  BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
+#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+
+  // Generates code to perform the binary operations via |function_id|.
+  void DoBinaryOp(Runtime::FunctionId function_id,
+                  compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform the count operations via |function_id|.
+  void DoCountOp(Runtime::FunctionId function_id,
+                 compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform the comparison operation associated with
+  // |compare_op|.
+  void DoCompareOp(Token::Value compare_op,
+                   compiler::InterpreterAssembler* assembler);
+
+  // Generates code to load a constant from the constant pool.
+  void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a global load via |ic|.
+  void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a global store via |ic|.
+  void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a named property load via |ic|.
+  void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a keyed property load via |ic|.
+  void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a namedproperty store via |ic|.
+  void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a keyed property store via |ic|.
+  void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a JS call.
+  void DoJSCall(compiler::InterpreterAssembler* assembler);
+
+  // Generates code ro create a literal via |function_id|.
+  void DoCreateLiteral(Runtime::FunctionId function_id,
+                       compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform delete via function_id.
+  void DoDelete(Runtime::FunctionId function_id,
+                compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a lookup slot load via |function_id|.
+  void DoLoadLookupSlot(Runtime::FunctionId function_id,
+                        compiler::InterpreterAssembler* assembler);
+
+  // Generates code to perform a lookup slot store depending on |language_mode|.
+  void DoStoreLookupSlot(LanguageMode language_mode,
+                         compiler::InterpreterAssembler* assembler);
+
+  bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
+
+  Isolate* isolate_;
+
+  DISALLOW_COPY_AND_ASSIGN(Interpreter);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_INTERPRETER_H_