Merge V8 5.3.332.45.  DO NOT MERGE

Test: Manual

FPIIM-449

Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 75bf631..c74fe7e 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -6,7 +6,10 @@
 
 #include "src/compiler.h"
 #include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecode-peephole-optimizer.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
 #include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
@@ -22,29 +25,34 @@
       bytecode_generated_(false),
       constant_array_builder_(isolate, zone),
       handler_table_builder_(isolate, zone),
-      source_position_table_builder_(isolate, zone),
-      exit_seen_in_block_(false),
-      unbound_jumps_(0),
+      return_seen_in_block_(false),
       parameter_count_(parameter_count),
       local_register_count_(locals_count),
       context_register_count_(context_count),
       temporary_allocator_(zone, fixed_register_count()),
-      bytecode_array_writer_(zone, &source_position_table_builder_),
+      bytecode_array_writer_(isolate, zone, &constant_array_builder_),
       pipeline_(&bytecode_array_writer_) {
   DCHECK_GE(parameter_count_, 0);
   DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
 
+  if (FLAG_ignition_deadcode) {
+    pipeline_ = new (zone) BytecodeDeadCodeOptimizer(pipeline_);
+  }
+
   if (FLAG_ignition_peephole) {
     pipeline_ = new (zone)
         BytecodePeepholeOptimizer(&constant_array_builder_, pipeline_);
   }
 
+  if (FLAG_ignition_reo) {
+    pipeline_ = new (zone) BytecodeRegisterOptimizer(
+        zone, &temporary_allocator_, parameter_count, pipeline_);
+  }
+
   return_position_ =
       literal ? std::max(literal->start_position(), literal->end_position() - 1)
               : RelocInfo::kNoPosition;
-  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
-                               source_position_table_builder()));
 }
 
 Register BytecodeArrayBuilder::first_context_register() const {
@@ -52,132 +60,98 @@
   return Register(local_register_count_);
 }
 
-
 Register BytecodeArrayBuilder::last_context_register() const {
   DCHECK_GT(context_register_count_, 0);
   return Register(local_register_count_ + context_register_count_ - 1);
 }
 
-
 Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
   DCHECK_GE(parameter_index, 0);
   return Register::FromParameterIndex(parameter_index, parameter_count());
 }
 
-
 bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
   return reg.is_parameter() || reg.index() < locals_count();
 }
 
-
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
-  DCHECK_EQ(0, unbound_jumps_);
-  DCHECK_EQ(bytecode_generated_, false);
-  DCHECK(exit_seen_in_block_);
-
-  pipeline()->FlushBasicBlock();
-  const ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
-
-  int bytecode_size = static_cast<int>(bytecodes->size());
-
-  // All locals need a frame slot for the debugger, but may not be
-  // present in generated code.
-  int frame_size_for_locals = fixed_register_count() * kPointerSize;
-  int frame_size_used = bytecode_array_writer()->GetMaximumFrameSizeUsed();
-  int frame_size = std::max(frame_size_for_locals, frame_size_used);
-  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
-  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
-  Handle<ByteArray> source_position_table =
-      source_position_table_builder()->ToSourcePositionTable();
-  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
-      bytecode_size, &bytecodes->front(), frame_size, parameter_count(),
-      constant_pool);
-  bytecode_array->set_handler_table(*handler_table);
-  bytecode_array->set_source_position_table(*source_position_table);
-
-  void* line_info = source_position_table_builder()->DetachJITHandlerData();
-  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
-                               AbstractCode::cast(*bytecode_array), line_info));
-
+  DCHECK(return_seen_in_block_);
+  DCHECK(!bytecode_generated_);
   bytecode_generated_ = true;
-  return bytecode_array;
+
+  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
+  return pipeline_->ToBytecodeArray(fixed_register_count(), parameter_count(),
+                                    handler_table);
 }
 
+namespace {
+
+static bool ExpressionPositionIsNeeded(Bytecode bytecode) {
+  // An expression position is always needed if filtering is turned
+  // off. Otherwise an expression is only needed if the bytecode has
+  // external side effects.
+  return !FLAG_ignition_filter_expression_positions ||
+         !Bytecodes::IsWithoutExternalSideEffects(bytecode);
+}
+
+}  // namespace
+
 void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
   if (latest_source_info_.is_valid()) {
-    node->source_info().Update(latest_source_info_);
-    latest_source_info_.set_invalid();
+    // Statement positions need to be emitted immediately.  Expression
+    // positions can be pushed back until a bytecode is found that can
+    // throw. Hence we only invalidate the existing source position
+    // information if it is used.
+    if (latest_source_info_.is_statement() ||
+        ExpressionPositionIsNeeded(node->bytecode())) {
+      node->source_info().Clone(latest_source_info_);
+      latest_source_info_.set_invalid();
+    }
   }
 }
 
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2,
+                                  uint32_t operand3) {
+  DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
+  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1, uint32_t operand2) {
+  DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
+  BytecodeNode node(bytecode, operand0, operand1, operand2);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+                                  uint32_t operand1) {
+  DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
+  BytecodeNode node(bytecode, operand0, operand1);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+  DCHECK(OperandsAreValid(bytecode, 1, operand0));
+  BytecodeNode node(bytecode, operand0);
+  AttachSourceInfo(&node);
+  pipeline()->Write(&node);
+}
+
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-
+  DCHECK(OperandsAreValid(bytecode, 0));
   BytecodeNode node(bytecode);
   AttachSourceInfo(&node);
   pipeline()->Write(&node);
 }
 
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1,
-                                        uint32_t operand2, uint32_t operand3) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 3, operand3));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
-                    operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1,
-                                        uint32_t operand2) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
-  BytecodeNode node(bytecode, operand0, operand1, operand2, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0, uint32_t operand1) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
-  BytecodeNode node(bytecode, operand0, operand1, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
-                                        OperandScale operand_scale,
-                                        uint32_t operand0) {
-  // Don't output dead code.
-  if (exit_seen_in_block_) return;
-  DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
-  BytecodeNode node(bytecode, operand0, operand_scale);
-  AttachSourceInfo(&node);
-  pipeline()->Write(&node);
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
                                                             Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
-               RegisterOperand(reg));
+  Output(BytecodeForBinaryOperation(op), RegisterOperand(reg));
   return *this;
 }
 
@@ -186,7 +160,6 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
   Output(Bytecode::kToBooleanLogicalNot);
   return *this;
@@ -200,62 +173,47 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
                                                              Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(BytecodeForCompareOperation(op), operand_scale,
-               RegisterOperand(reg));
+  Output(BytecodeForCompareOperation(op), RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
     v8::internal::Smi* smi) {
   int32_t raw_smi = smi->value();
   if (raw_smi == 0) {
     Output(Bytecode::kLdaZero);
   } else {
-    OperandSize operand_size = Bytecodes::SizeForSignedOperand(raw_smi);
-    OperandScale operand_scale = Bytecodes::OperandSizesToScale(operand_size);
-    OutputScaled(Bytecode::kLdaSmi, operand_scale,
-                 SignedOperand(raw_smi, operand_size));
+    Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
   }
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
   size_t entry = GetConstantPoolEntry(object);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
-  OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
+  Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
   Output(Bytecode::kLdaUndefined);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
   Output(Bytecode::kLdaNull);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
   Output(Bytecode::kLdaTheHole);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
   Output(Bytecode::kLdaTrue);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
   Output(Bytecode::kLdaFalse);
   return *this;
@@ -263,43 +221,29 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
+  Output(Bytecode::kLdar, RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
-  OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
+  Output(Bytecode::kStar, RegisterOperand(reg));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(from.SizeOfOperand(), to.SizeOfOperand());
-  OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
-               RegisterOperand(to));
+  Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
-    const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
+                                                       TypeofMode typeof_mode) {
   // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
   Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
-  size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -307,31 +251,21 @@
     const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
                                                             int slot_index) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
-  OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
-               RegisterOperand(context), UnsignedOperand(slot_index));
+  Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
+         UnsignedOperand(slot_index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
                                                              int slot_index) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
-  OutputScaled(Bytecode::kStaContextSlot, operand_scale,
-               RegisterOperand(context), UnsignedOperand(slot_index));
+  Output(Bytecode::kStaContextSlot, RegisterOperand(context),
+         UnsignedOperand(slot_index));
   return *this;
 }
 
@@ -341,9 +275,7 @@
                           ? Bytecode::kLdaLookupSlotInsideTypeof
                           : Bytecode::kLdaLookupSlot;
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+  Output(bytecode, UnsignedOperand(name_index));
   return *this;
 }
 
@@ -351,70 +283,52 @@
     const Handle<String> name, LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(name_index));
-  OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+  Output(bytecode, UnsignedOperand(name_index));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
-               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
+         UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
     Register object, int feedback_slot) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
-               UnsignedOperand(feedback_slot));
+  Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
     Register object, const Handle<Name> name, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForStoreIC(language_mode);
+  Bytecode bytecode = BytecodeForStoreNamedProperty(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
-               UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+  Output(bytecode, RegisterOperand(object), UnsignedOperand(name_index),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      object.SizeOfOperand(), key.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(object),
-               RegisterOperand(key), UnsignedOperand(feedback_slot));
+  Bytecode bytecode = BytecodeForStoreKeyedProperty(language_mode);
+  Output(bytecode, RegisterOperand(object), RegisterOperand(key),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
     Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
   size_t entry = GetConstantPoolEntry(shared_info);
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
-  OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
-               UnsignedOperand(static_cast<size_t>(tenured)));
+  Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
+         UnsignedOperand(static_cast<size_t>(tenured)));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
     CreateArgumentsType type) {
   // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
@@ -425,71 +339,47 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
     Handle<String> pattern, int literal_index, int flags) {
   size_t pattern_entry = GetConstantPoolEntry(pattern);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(pattern_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
-               UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
-               UnsignedOperand(flags));
+  Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
     Handle<FixedArray> constant_elements, int literal_index, int flags) {
   size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(constant_elements_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
-               UnsignedOperand(constant_elements_entry),
-               UnsignedOperand(literal_index), UnsignedOperand(flags));
+  Output(Bytecode::kCreateArrayLiteral,
+         UnsignedOperand(constant_elements_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
     Handle<FixedArray> constant_properties, int literal_index, int flags) {
   size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(constant_properties_entry),
-      Bytecodes::SizeForUnsignedOperand(literal_index),
-      Bytecodes::SizeForUnsignedOperand(flags));
-  OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
-               UnsignedOperand(constant_properties_entry),
-               UnsignedOperand(literal_index), UnsignedOperand(flags));
+  Output(Bytecode::kCreateObjectLiteral,
+         UnsignedOperand(constant_properties_entry),
+         UnsignedOperand(literal_index), UnsignedOperand(flags));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
-  OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
+  Output(Bytecode::kPushContext, RegisterOperand(context));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(context.SizeOfOperand());
-  OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
+  Output(Bytecode::kPopContext, RegisterOperand(context));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
   Output(Bytecode::kToObject);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
   Output(Bytecode::kToName);
   return *this;
@@ -500,207 +390,24 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
-  size_t current_offset = pipeline()->FlushForOffset();
-  if (label->is_forward_target()) {
-    // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(current_offset, label->offset());
-    // Now treat as if the label will only be back referred to.
-  }
-  label->bind_to(current_offset);
+  pipeline_->BindLabel(label);
   LeaveBasicBlock();
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
                                                  BytecodeLabel* label) {
-  DCHECK(!label->is_bound());
-  DCHECK(target.is_bound());
-  // There is no need to flush the pipeline here, it will have been
-  // flushed when |target| was bound.
-  if (label->is_forward_target()) {
-    // An earlier jump instruction refers to this label. Update it's location.
-    PatchJump(target.offset(), label->offset());
-    // Now treat as if the label will only be back referred to.
-  }
-  label->bind_to(target.offset());
+  pipeline_->BindLabel(target, label);
   LeaveBasicBlock();
   return *this;
 }
 
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
-    Bytecode jump_bytecode) {
-  switch (jump_bytecode) {
-    case Bytecode::kJump:
-      return Bytecode::kJumpConstant;
-    case Bytecode::kJumpIfTrue:
-      return Bytecode::kJumpIfTrueConstant;
-    case Bytecode::kJumpIfFalse:
-      return Bytecode::kJumpIfFalseConstant;
-    case Bytecode::kJumpIfToBooleanTrue:
-      return Bytecode::kJumpIfToBooleanTrueConstant;
-    case Bytecode::kJumpIfToBooleanFalse:
-      return Bytecode::kJumpIfToBooleanFalseConstant;
-    case Bytecode::kJumpIfNotHole:
-      return Bytecode::kJumpIfNotHoleConstant;
-    case Bytecode::kJumpIfNull:
-      return Bytecode::kJumpIfNullConstant;
-    case Bytecode::kJumpIfUndefined:
-      return Bytecode::kJumpIfUndefinedConstant;
-    default:
-      UNREACHABLE();
-      return Bytecode::kIllegal;
-  }
-}
-
-void BytecodeArrayBuilder::PatchJumpWith8BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  size_t operand_location = jump_location + 1;
-  DCHECK_EQ(bytecodes->at(operand_location), 0);
-  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
-    // The jump fits within the range of an Imm operand, so cancel
-    // the reservation and jump directly.
-    constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
-    bytecodes->at(operand_location) = static_cast<uint8_t>(delta);
-  } else {
-    // The jump does not fit within the range of an Imm operand, so
-    // commit reservation putting the offset into the constant pool,
-    // and update the jump instruction and operand.
-    size_t entry = constant_array_builder()->CommitReservedEntry(
-        OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
-    DCHECK(Bytecodes::SizeForUnsignedOperand(entry) == OperandSize::kByte);
-    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
-    bytecodes->at(operand_location) = static_cast<uint8_t>(entry);
-  }
-}
-
-void BytecodeArrayBuilder::PatchJumpWith16BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
-  size_t operand_location = jump_location + 1;
-  uint8_t operand_bytes[2];
-  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
-    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
-    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
-  } else {
-    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
-    bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
-    size_t entry = constant_array_builder()->CommitReservedEntry(
-        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
-    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
-  }
-  DCHECK(bytecodes->at(operand_location) == 0 &&
-         bytecodes->at(operand_location + 1) == 0);
-  bytecodes->at(operand_location++) = operand_bytes[0];
-  bytecodes->at(operand_location) = operand_bytes[1];
-}
-
-void BytecodeArrayBuilder::PatchJumpWith32BitOperand(
-    ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
-  DCHECK(Bytecodes::IsJumpImmediate(
-      Bytecodes::FromByte(bytecodes->at(jump_location))));
-  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
-  uint8_t operand_bytes[4];
-  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
-  size_t operand_location = jump_location + 1;
-  DCHECK(bytecodes->at(operand_location) == 0 &&
-         bytecodes->at(operand_location + 1) == 0 &&
-         bytecodes->at(operand_location + 2) == 0 &&
-         bytecodes->at(operand_location + 3) == 0);
-  bytecodes->at(operand_location++) = operand_bytes[0];
-  bytecodes->at(operand_location++) = operand_bytes[1];
-  bytecodes->at(operand_location++) = operand_bytes[2];
-  bytecodes->at(operand_location) = operand_bytes[3];
-}
-
-void BytecodeArrayBuilder::PatchJump(size_t jump_target, size_t jump_location) {
-  ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
-  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
-  int delta = static_cast<int>(jump_target - jump_location);
-  int prefix_offset = 0;
-  OperandScale operand_scale = OperandScale::kSingle;
-  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
-    // If a prefix scaling bytecode is emitted the target offset is one
-    // less than the case of no prefix scaling bytecode.
-    delta -= 1;
-    prefix_offset = 1;
-    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
-    jump_bytecode =
-        Bytecodes::FromByte(bytecodes->at(jump_location + prefix_offset));
-  }
-
-  DCHECK(Bytecodes::IsJump(jump_bytecode));
-  switch (operand_scale) {
-    case OperandScale::kSingle:
-      PatchJumpWith8BitOperand(bytecodes, jump_location, delta);
-      break;
-    case OperandScale::kDouble:
-      PatchJumpWith16BitOperand(bytecodes, jump_location + prefix_offset,
-                                delta);
-      break;
-    case OperandScale::kQuadruple:
-      PatchJumpWith32BitOperand(bytecodes, jump_location + prefix_offset,
-                                delta);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  unbound_jumps_--;
-}
-
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
-  // Don't emit dead code.
-  if (exit_seen_in_block_) return *this;
-
-  if (label->is_bound()) {
-    // Label has been bound already so this is a backwards jump.
-    size_t current_offset = pipeline()->FlushForOffset();
-    CHECK_GE(current_offset, label->offset());
-    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
-    size_t abs_delta = current_offset - label->offset();
-    int delta = -static_cast<int>(abs_delta);
-    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
-    if (operand_size > OperandSize::kByte) {
-      // Adjust for scaling byte prefix for wide jump offset.
-      DCHECK_LE(delta, 0);
-      delta -= 1;
-    }
-    OutputScaled(jump_bytecode, Bytecodes::OperandSizesToScale(operand_size),
-                 SignedOperand(delta, operand_size));
-  } else {
-    // The label has not yet been bound so this is a forward reference
-    // that will be patched when the label is bound. We create a
-    // reservation in the constant pool so the jump can be patched
-    // when the label is bound. The reservation means the maximum size
-    // of the operand for the constant is known and the jump can
-    // be emitted into the bytecode stream with space for the operand.
-    unbound_jumps_++;
-    OperandSize reserved_operand_size =
-        constant_array_builder()->CreateReservedEntry();
-    OutputScaled(jump_bytecode,
-                 Bytecodes::OperandSizesToScale(reserved_operand_size), 0);
-
-    // Calculate the label position by flushing for offset after emitting the
-    // jump bytecode.
-    size_t offset = pipeline()->FlushForOffset();
-    OperandScale operand_scale =
-        Bytecodes::OperandSizesToScale(reserved_operand_size);
-    offset -= Bytecodes::Size(jump_bytecode, operand_scale);
-    if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
-      offset -= 1;
-    }
-    label->set_referrer(offset);
-  }
+  BytecodeNode node(jump_bytecode, 0);
+  AttachSourceInfo(&node);
+  pipeline_->WriteJump(&node, label);
   LeaveBasicBlock();
   return *this;
 }
@@ -730,39 +437,43 @@
   return OutputJump(Bytecode::kJumpIfUndefined, label);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
-  if (position != RelocInfo::kNoPosition) {
-    // We need to attach a non-breakable source position to a stack check,
-    // so we simply add it as expression position.
-    latest_source_info_.Update({position, false});
-  }
-  Output(Bytecode::kStackCheck);
-  return *this;
-}
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
     BytecodeLabel* label) {
   return OutputJump(Bytecode::kJumpIfNotHole, label);
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
-  Output(Bytecode::kThrow);
-  exit_seen_in_block_ = true;
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+  if (position != RelocInfo::kNoPosition) {
+    // We need to attach a non-breakable source position to a stack
+    // check, so we simply add it as expression position. There can be
+    // a prior statement position from constructs like:
+    //
+    //    do var x;  while (false);
+    //
+    // A Nop could be inserted for empty statements, but since no code
+    // is associated with these positions, instead we force the stack
+    // check's expression position which eliminates the empty
+    // statement's position.
+    latest_source_info_.ForceExpressionPosition(position);
+  }
+  Output(Bytecode::kStackCheck);
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
+  Output(Bytecode::kThrow);
+  return *this;
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
   Output(Bytecode::kReThrow);
-  exit_seen_in_block_ = true;
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
   SetReturnPosition();
   Output(Bytecode::kReturn);
-  exit_seen_in_block_ = true;
+  return_seen_in_block_ = true;
   return *this;
 }
 
@@ -773,100 +484,74 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
     Register cache_info_triple) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(cache_info_triple.SizeOfOperand());
-  OutputScaled(Bytecode::kForInPrepare, operand_scale,
-               RegisterOperand(cache_info_triple));
+  Output(Bytecode::kForInPrepare, RegisterOperand(cache_info_triple));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      index.SizeOfOperand(), cache_length.SizeOfOperand());
-  OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
-               RegisterOperand(cache_length));
+  Output(Bytecode::kForInDone, RegisterOperand(index),
+         RegisterOperand(cache_length));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
     Register receiver, Register index, Register cache_type_array_pair,
     int feedback_slot) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      receiver.SizeOfOperand(), index.SizeOfOperand(),
-      cache_type_array_pair.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
-               RegisterOperand(index), RegisterOperand(cache_type_array_pair),
-               UnsignedOperand(feedback_slot));
+  Output(Bytecode::kForInNext, RegisterOperand(receiver),
+         RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+         UnsignedOperand(feedback_slot));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(index.SizeOfOperand());
-  OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
+  Output(Bytecode::kForInStep, RegisterOperand(index));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
     Register generator) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
-  OutputScaled(Bytecode::kSuspendGenerator, operand_scale,
-               RegisterOperand(generator));
+  Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
     Register generator) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
-  OutputScaled(Bytecode::kResumeGenerator, operand_scale,
-               RegisterOperand(generator));
+  Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
                                                         bool will_catch) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetHandlerTarget(handler_id, offset);
+  BytecodeLabel handler;
+  Bind(&handler);
+  handler_table_builder()->SetHandlerTarget(handler_id, handler.offset());
   handler_table_builder()->SetPrediction(handler_id, will_catch);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
                                                          Register context) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetTryRegionStart(handler_id, offset);
+  BytecodeLabel try_begin;
+  Bind(&try_begin);
+  handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
   handler_table_builder()->SetContextRegister(handler_id, context);
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
-  size_t offset = pipeline()->FlushForOffset();
-  handler_table_builder()->SetTryRegionEnd(handler_id, offset);
+  BytecodeLabel try_end;
+  Bind(&try_end);
+  handler_table_builder()->SetTryRegionEnd(handler_id, try_end.offset());
   return *this;
 }
 
-
-void BytecodeArrayBuilder::LeaveBasicBlock() {
-  exit_seen_in_block_ = false;
-  pipeline()->FlushBasicBlock();
-}
-
 void BytecodeArrayBuilder::EnsureReturn() {
-  if (!exit_seen_in_block_) {
+  if (!return_seen_in_block_) {
     LoadUndefined();
     Return();
   }
-  DCHECK(exit_seen_in_block_);
+  DCHECK(return_seen_in_block_);
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -875,14 +560,8 @@
                                                  int feedback_slot,
                                                  TailCallMode tail_call_mode) {
   Bytecode bytecode = BytecodeForCall(tail_call_mode);
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      callable.SizeOfOperand(), receiver_args.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(receiver_args_count),
-      Bytecodes::SizeForUnsignedOperand(feedback_slot));
-  OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
-               RegisterOperand(receiver_args),
-               UnsignedOperand(receiver_args_count),
-               UnsignedOperand(feedback_slot));
+  Output(bytecode, RegisterOperand(callable), RegisterOperand(receiver_args),
+         UnsignedOperand(receiver_args_count), UnsignedOperand(feedback_slot));
   return *this;
 }
 
@@ -893,15 +572,11 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      constructor.SizeOfOperand(), first_arg.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(arg_count));
-  OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
-               RegisterOperand(first_arg), UnsignedOperand(arg_count));
+  Output(Bytecode::kNew, RegisterOperand(constructor),
+         RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
@@ -910,17 +585,19 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
-                          ? Bytecode::kInvokeIntrinsic
-                          : Bytecode::kCallRuntime;
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count));
-  OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
-               RegisterOperand(first_arg), UnsignedOperand(arg_count));
+  Bytecode bytecode;
+  uint32_t id;
+  if (IntrinsicsHelper::IsSupported(function_id)) {
+    bytecode = Bytecode::kInvokeIntrinsic;
+    id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+  } else {
+    bytecode = Bytecode::kCallRuntime;
+    id = static_cast<uint32_t>(function_id);
+  }
+  Output(bytecode, id, RegisterOperand(first_arg), UnsignedOperand(arg_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
     Register first_return) {
@@ -930,34 +607,22 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count),
-      first_return.SizeOfOperand());
-  OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
-               static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
-               UnsignedOperand(arg_count), RegisterOperand(first_return));
+  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+         RegisterOperand(first_arg), UnsignedOperand(arg_count),
+         RegisterOperand(first_return));
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
     int context_index, Register receiver_args, size_t receiver_args_count) {
-  OperandScale operand_scale = Bytecodes::OperandSizesToScale(
-      Bytecodes::SizeForUnsignedOperand(context_index),
-      receiver_args.SizeOfOperand(),
-      Bytecodes::SizeForUnsignedOperand(receiver_args_count));
-  OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
-               UnsignedOperand(context_index), RegisterOperand(receiver_args),
-               UnsignedOperand(receiver_args_count));
+  Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
+         RegisterOperand(receiver_args), UnsignedOperand(receiver_args_count));
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  OperandScale operand_scale =
-      Bytecodes::OperandSizesToScale(object.SizeOfOperand());
-  OutputScaled(BytecodeForDelete(language_mode), operand_scale,
-               RegisterOperand(object));
+  Output(BytecodeForDelete(language_mode), RegisterOperand(object));
   return *this;
 }
 
@@ -967,101 +632,37 @@
 
 void BytecodeArrayBuilder::SetReturnPosition() {
   if (return_position_ == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({return_position_, true});
+  latest_source_info_.MakeStatementPosition(return_position_);
 }
 
 void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
   if (stmt->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({stmt->position(), true});
+  latest_source_info_.MakeStatementPosition(stmt->position());
 }
 
 void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({expr->position(), false});
+  if (!latest_source_info_.is_statement()) {
+    // Ensure the current expression position is overwritten with the
+    // latest value.
+    latest_source_info_.MakeExpressionPosition(expr->position());
+  }
 }
 
 void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
   if (expr->position() == RelocInfo::kNoPosition) return;
-  if (exit_seen_in_block_) return;
-  latest_source_info_.Update({expr->position(), true});
+  latest_source_info_.MakeStatementPosition(expr->position());
 }
 
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
   return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
-                                          OperandScale operand_scale,
-                                          int operand_index,
-                                          uint32_t operand_value) const {
-  OperandSize operand_size =
-      Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
-  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
-  switch (operand_type) {
-    case OperandType::kNone:
-      return false;
-    case OperandType::kRegCount: {
-      if (operand_index > 0) {
-        OperandType previous_operand_type =
-            Bytecodes::GetOperandType(bytecode, operand_index - 1);
-        if (previous_operand_type != OperandType::kMaybeReg &&
-            previous_operand_type != OperandType::kReg) {
-          return false;
-        }
-      }
-    }  // Fall-through
-    case OperandType::kFlag8:
-    case OperandType::kIdx:
-    case OperandType::kRuntimeId:
-    case OperandType::kImm: {
-      size_t unsigned_value = static_cast<size_t>(operand_value);
-      return Bytecodes::SizeForUnsignedOperand(unsigned_value) <= operand_size;
-    }
-    case OperandType::kMaybeReg:
-      if (RegisterFromOperand(operand_value) == Register(0)) {
-        return true;
-      }
-    // Fall-through to kReg case.
-    case OperandType::kReg:
-    case OperandType::kRegOut: {
-      Register reg = RegisterFromOperand(operand_value);
-      return RegisterIsValid(reg, operand_size);
-    }
-    case OperandType::kRegOutPair:
-    case OperandType::kRegPair: {
-      Register reg0 = RegisterFromOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      // The size of reg1 is immaterial.
-      return RegisterIsValid(reg0, operand_size) &&
-             RegisterIsValid(reg1, OperandSize::kQuad);
-    }
-    case OperandType::kRegOutTriple: {
-      Register reg0 = RegisterFromOperand(operand_value);
-      Register reg1 = Register(reg0.index() + 1);
-      Register reg2 = Register(reg0.index() + 2);
-      // The size of reg1 and reg2 is immaterial.
-      return RegisterIsValid(reg0, operand_size) &&
-             RegisterIsValid(reg1, OperandSize::kQuad) &&
-             RegisterIsValid(reg2, OperandSize::kQuad);
-    }
-  }
-  UNREACHABLE();
-  return false;
-}
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
-                                           OperandSize reg_size) const {
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
   if (!reg.is_valid()) {
     return false;
   }
 
-  if (reg.SizeOfOperand() > reg_size) {
-    return false;
-  }
-
   if (reg.is_current_context() || reg.is_function_closure() ||
       reg.is_new_target()) {
     return true;
@@ -1075,6 +676,90 @@
   }
 }
 
+bool BytecodeArrayBuilder::OperandsAreValid(
+    Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
+    uint32_t operand2, uint32_t operand3) const {
+  if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
+    return false;
+  }
+
+  uint32_t operands[] = {operand0, operand1, operand2, operand3};
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+  for (int i = 0; i < operand_count; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kNone:
+        return false;
+      case OperandType::kRegCount: {
+        CHECK_NE(i, 0);
+        CHECK(operand_types[i - 1] == OperandType::kMaybeReg ||
+              operand_types[i - 1] == OperandType::kReg);
+        if (i > 0 && operands[i] > 0) {
+          Register start = Register::FromOperand(operands[i - 1]);
+          Register end(start.index() + static_cast<int>(operands[i]) - 1);
+          if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
+            return false;
+          }
+        }
+        break;
+      }
+      case OperandType::kFlag8:
+      case OperandType::kIntrinsicId:
+        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+            OperandSize::kByte) {
+          return false;
+        }
+        break;
+      case OperandType::kRuntimeId:
+        if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+            OperandSize::kShort) {
+          return false;
+        }
+        break;
+      case OperandType::kIdx:
+        // TODO(oth): Consider splitting OperandType::kIdx into two
+        // operand types. One which is a constant pool index that can
+        // be checked, and the other is an unsigned value.
+        break;
+      case OperandType::kImm:
+        break;
+      case OperandType::kMaybeReg:
+        if (Register::FromOperand(operands[i]) == Register(0)) {
+          break;
+        }
+      // Fall-through to kReg case.
+      case OperandType::kReg:
+      case OperandType::kRegOut: {
+        Register reg = Register::FromOperand(operands[i]);
+        if (!RegisterIsValid(reg)) {
+          return false;
+        }
+        break;
+      }
+      case OperandType::kRegOutPair:
+      case OperandType::kRegPair: {
+        Register reg0 = Register::FromOperand(operands[i]);
+        Register reg1 = Register(reg0.index() + 1);
+        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
+          return false;
+        }
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        Register reg0 = Register::FromOperand(operands[i]);
+        Register reg1 = Register(reg0.index() + 1);
+        Register reg2 = Register(reg0.index() + 2);
+        if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
+            !RegisterIsValid(reg2)) {
+          return false;
+        }
+        break;
+      }
+    }
+  }
+
+  return true;
+}
+
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
   switch (op) {
@@ -1106,7 +791,6 @@
   }
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
   switch (op) {
@@ -1120,7 +804,6 @@
   }
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
   switch (op) {
@@ -1148,35 +831,33 @@
   }
 }
 
-
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kStoreICSloppy;
-    case STRICT:
-      return Bytecode::kStoreICStrict;
-    default:
-      UNREACHABLE();
-  }
-  return Bytecode::kIllegal;
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+Bytecode BytecodeArrayBuilder::BytecodeForStoreNamedProperty(
     LanguageMode language_mode) {
   switch (language_mode) {
     case SLOPPY:
-      return Bytecode::kKeyedStoreICSloppy;
+      return Bytecode::kStaNamedPropertySloppy;
     case STRICT:
-      return Bytecode::kKeyedStoreICStrict;
+      return Bytecode::kStaNamedPropertyStrict;
     default:
       UNREACHABLE();
   }
   return Bytecode::kIllegal;
 }
 
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreKeyedProperty(
+    LanguageMode language_mode) {
+  switch (language_mode) {
+    case SLOPPY:
+      return Bytecode::kStaKeyedPropertySloppy;
+    case STRICT:
+      return Bytecode::kStaKeyedPropertyStrict;
+    default:
+      UNREACHABLE();
+  }
+  return Bytecode::kIllegal;
+}
 
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
@@ -1184,7 +865,6 @@
                                       : Bytecode::kLdaGlobal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
     LanguageMode language_mode) {
@@ -1199,7 +879,6 @@
   return Bytecode::kIllegal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
     LanguageMode language_mode) {
@@ -1229,7 +908,6 @@
   return Bytecode::kIllegal;
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
   switch (language_mode) {
@@ -1256,38 +934,6 @@
   return Bytecode::kIllegal;
 }
 
-uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
-  return static_cast<uint32_t>(reg.ToOperand());
-}
-
-Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
-  return Register::FromOperand(static_cast<int32_t>(operand));
-}
-
-uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
-  switch (size) {
-    case OperandSize::kByte:
-      return static_cast<uint8_t>(value & 0xff);
-    case OperandSize::kShort:
-      return static_cast<uint16_t>(value & 0xffff);
-    case OperandSize::kQuad:
-      return static_cast<uint32_t>(value);
-    case OperandSize::kNone:
-      UNREACHABLE();
-  }
-  return 0;
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
-  DCHECK_GE(value, 0);
-  return static_cast<uint32_t>(value);
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
-  DCHECK_LE(value, kMaxUInt32);
-  return static_cast<uint32_t>(value);
-}
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 3930a06..8a10973 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -11,7 +11,6 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -86,8 +85,7 @@
   BytecodeArrayBuilder& LoadFalse();
 
   // Global loads to the accumulator and stores from the accumulator.
-  BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
-                                   TypeofMode typeof_mode);
+  BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
   BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
                                     int feedback_slot,
                                     LanguageMode language_mode);
@@ -273,11 +271,23 @@
 
   void EnsureReturn();
 
-  static uint32_t RegisterOperand(Register reg);
-  static Register RegisterFromOperand(uint32_t operand);
-  static uint32_t SignedOperand(int value, OperandSize size);
-  static uint32_t UnsignedOperand(int value);
-  static uint32_t UnsignedOperand(size_t value);
+  static uint32_t RegisterOperand(Register reg) {
+    return static_cast<uint32_t>(reg.ToOperand());
+  }
+
+  static uint32_t SignedOperand(int value) {
+    return static_cast<uint32_t>(value);
+  }
+
+  static uint32_t UnsignedOperand(int value) {
+    DCHECK_GE(value, 0);
+    return static_cast<uint32_t>(value);
+  }
+
+  static uint32_t UnsignedOperand(size_t value) {
+    DCHECK_LE(value, kMaxUInt32);
+    return static_cast<uint32_t>(value);
+  }
 
  private:
   friend class BytecodeRegisterAllocator;
@@ -285,8 +295,8 @@
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
-  static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
-  static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreNamedProperty(LanguageMode language_mode);
+  static Bytecode BytecodeForStoreKeyedProperty(LanguageMode language_mode);
   static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
   static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
   static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
@@ -294,34 +304,21 @@
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
   static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
-  static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
-
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2, uint32_t operand3);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+              uint32_t operand2);
+  void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+  void Output(Bytecode bytecode, uint32_t operand0);
   void Output(Bytecode bytecode);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1, uint32_t operand2,
-                    uint32_t operand3);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1, uint32_t operand2);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0, uint32_t operand1);
-  void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
-                    uint32_t operand0);
 
   BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
                                    BytecodeLabel* label);
-  void PatchJump(size_t jump_target, size_t jump_location);
-  void PatchJumpWith8BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                size_t jump_location, int delta);
-  void PatchJumpWith16BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                 size_t jump_location, int delta);
-  void PatchJumpWith32BitOperand(ZoneVector<uint8_t>* bytecodes,
-                                 size_t jump_location, int delta);
 
-  void LeaveBasicBlock();
-
-  bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
-                      int operand_index, uint32_t operand_value) const;
-  bool RegisterIsValid(Register reg, OperandSize reg_size) const;
+  bool RegisterIsValid(Register reg) const;
+  bool OperandsAreValid(Bytecode bytecode, int operand_count,
+                        uint32_t operand0 = 0, uint32_t operand1 = 0,
+                        uint32_t operand2 = 0, uint32_t operand3 = 0) const;
 
   // Attach latest source position to |node|.
   void AttachSourceInfo(BytecodeNode* node);
@@ -337,6 +334,8 @@
   // during bytecode generation.
   BytecodeArrayBuilder& Illegal();
 
+  void LeaveBasicBlock() { return_seen_in_block_ = false; }
+
   Isolate* isolate() const { return isolate_; }
   BytecodeArrayWriter* bytecode_array_writer() {
     return &bytecode_array_writer_;
@@ -351,18 +350,13 @@
   HandlerTableBuilder* handler_table_builder() {
     return &handler_table_builder_;
   }
-  SourcePositionTableBuilder* source_position_table_builder() {
-    return &source_position_table_builder_;
-  }
 
   Isolate* isolate_;
   Zone* zone_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
   HandlerTableBuilder handler_table_builder_;
-  SourcePositionTableBuilder source_position_table_builder_;
-  bool exit_seen_in_block_;
-  int unbound_jumps_;
+  bool return_seen_in_block_;
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
@@ -375,47 +369,6 @@
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
 
-
-// A label representing a branch target in a bytecode array. When a
-// label is bound, it represents a known position in the bytecode
-// array. For labels that are forward references there can be at most
-// one reference whilst it is unbound.
-class BytecodeLabel final {
- public:
-  BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
-
-  bool is_bound() const { return bound_; }
-  size_t offset() const { return offset_; }
-
- private:
-  static const size_t kInvalidOffset = static_cast<size_t>(-1);
-
-  void bind_to(size_t offset) {
-    DCHECK(!bound_ && offset != kInvalidOffset);
-    offset_ = offset;
-    bound_ = true;
-  }
-
-  void set_referrer(size_t offset) {
-    DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
-    offset_ = offset;
-  }
-
-  bool is_forward_target() const {
-    return offset() != kInvalidOffset && !is_bound();
-  }
-
-  // There are three states for a label:
-  //                    bound_   offset_
-  //  UNSET             false    kInvalidOffset
-  //  FORWARD_TARGET    false    Offset of referring jump
-  //  BACKWARD_TARGET    true    Offset of label in bytecode array when bound
-  bool bound_;
-  size_t offset_;
-
-  friend class BytecodeArrayBuilder;
-};
-
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index 319d2a0..a4a8f79 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -4,6 +4,7 @@
 
 #include "src/interpreter/bytecode-array-iterator.h"
 
+#include "src/interpreter/interpreter-intrinsics.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -140,11 +141,23 @@
   }
 }
 
-uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
+    int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
   DCHECK(operand_type == OperandType::kRuntimeId);
-  return GetUnsignedOperand(operand_index, operand_type);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
+    int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kIntrinsicId);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return IntrinsicsHelper::ToRuntimeId(
+      static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
 }
 
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index b372894..90001ef 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -8,6 +8,7 @@
 #include "src/handles.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/objects.h"
+#include "src/runtime/runtime.h"
 
 namespace v8 {
 namespace internal {
@@ -34,7 +35,8 @@
   uint32_t GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
   int GetRegisterOperandRange(int operand_index) const;
-  uint32_t GetRuntimeIdOperand(int operand_index) const;
+  Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+  Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
   // Returns the absolute offset of the branch target at the current
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index 029688e..c476042 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -4,46 +4,165 @@
 
 #include "src/interpreter/bytecode-array-writer.h"
 
-#include <iomanip>
-#include "src/interpreter/source-position-table.h"
+#include "src/api.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/log.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
 BytecodeArrayWriter::BytecodeArrayWriter(
-    Zone* zone, SourcePositionTableBuilder* source_position_table_builder)
-    : bytecodes_(zone),
+    Isolate* isolate, Zone* zone, ConstantArrayBuilder* constant_array_builder)
+    : isolate_(isolate),
+      bytecodes_(zone),
       max_register_count_(0),
-      source_position_table_builder_(source_position_table_builder) {}
+      unbound_jumps_(0),
+      source_position_table_builder_(isolate, zone),
+      constant_array_builder_(constant_array_builder) {
+  LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
+                               source_position_table_builder()));
+}
 
 // override
 BytecodeArrayWriter::~BytecodeArrayWriter() {}
 
 // override
-size_t BytecodeArrayWriter::FlushForOffset() { return bytecodes()->size(); }
+Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  DCHECK_EQ(0, unbound_jumps_);
+
+  int bytecode_size = static_cast<int>(bytecodes()->size());
+
+  // All locals need a frame slot for the debugger, but may not be
+  // present in generated code.
+  int frame_size_for_locals = fixed_register_count * kPointerSize;
+  int frame_size_used = max_register_count() * kPointerSize;
+  int frame_size = std::max(frame_size_for_locals, frame_size_used);
+  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
+  Handle<ByteArray> source_position_table =
+      source_position_table_builder()->ToSourcePositionTable();
+  Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
+      bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
+      constant_pool);
+  bytecode_array->set_handler_table(*handler_table);
+  bytecode_array->set_source_position_table(*source_position_table);
+
+  void* line_info = source_position_table_builder()->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
+                               AbstractCode::cast(*bytecode_array), line_info));
+  return bytecode_array;
+}
 
 // override
 void BytecodeArrayWriter::Write(BytecodeNode* node) {
+  DCHECK(!Bytecodes::IsJump(node->bytecode()));
   UpdateSourcePositionTable(node);
   EmitBytecode(node);
 }
 
+// override
+void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
+  DCHECK(Bytecodes::IsJump(node->bytecode()));
+  UpdateSourcePositionTable(node);
+  EmitJump(node, label);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
+  size_t current_offset = bytecodes()->size();
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(current_offset, label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
+  label->bind_to(current_offset);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
+                                    BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
+  DCHECK(target.is_bound());
+  if (label->is_forward_target()) {
+    // An earlier jump instruction refers to this label. Update it's location.
+    PatchJump(target.offset(), label->offset());
+    // Now treat as if the label will only be back referred to.
+  }
+  label->bind_to(target.offset());
+}
+
 void BytecodeArrayWriter::UpdateSourcePositionTable(
     const BytecodeNode* const node) {
   int bytecode_offset = static_cast<int>(bytecodes()->size());
   const BytecodeSourceInfo& source_info = node->source_info();
   if (source_info.is_valid()) {
-    source_position_table_builder_->AddPosition(bytecode_offset,
-                                                source_info.source_position(),
-                                                source_info.is_statement());
+    source_position_table_builder()->AddPosition(bytecode_offset,
+                                                 source_info.source_position(),
+                                                 source_info.is_statement());
   }
 }
 
+namespace {
+
+OperandScale ScaleForScalableByteOperand(OperandSize operand_size) {
+  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+                static_cast<int>(OperandScale::kSingle));
+  STATIC_ASSERT(static_cast<int>(OperandSize::kShort) ==
+                static_cast<int>(OperandScale::kDouble));
+  STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) ==
+                static_cast<int>(OperandScale::kQuadruple));
+  return static_cast<OperandScale>(operand_size);
+}
+
+OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) {
+  int32_t signed_operand = static_cast<int32_t>(operand_value);
+  OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand);
+  return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) {
+  OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value);
+  return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale GetOperandScale(const BytecodeNode* const node) {
+  const OperandTypeInfo* operand_type_infos =
+      Bytecodes::GetOperandTypeInfos(node->bytecode());
+  OperandScale operand_scale = OperandScale::kSingle;
+  for (int i = 0; i < node->operand_count(); ++i) {
+    switch (operand_type_infos[i]) {
+      case OperandTypeInfo::kScalableSignedByte: {
+        uint32_t operand = node->operand(i);
+        operand_scale =
+            std::max(operand_scale, OperandScaleForScalableSignedByte(operand));
+        break;
+      }
+      case OperandTypeInfo::kScalableUnsignedByte: {
+        uint32_t operand = node->operand(i);
+        operand_scale = std::max(operand_scale,
+                                 OperandScaleForScalableUnsignedByte(operand));
+        break;
+      }
+      case OperandTypeInfo::kFixedUnsignedByte:
+      case OperandTypeInfo::kFixedUnsignedShort:
+        break;
+      case OperandTypeInfo::kNone:
+        UNREACHABLE();
+        break;
+    }
+  }
+  return operand_scale;
+}
+
+}  // namespace
+
 void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
   DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
 
-  OperandScale operand_scale = node->operand_scale();
+  OperandScale operand_scale = GetOperandScale(node);
   if (operand_scale != OperandScale::kSingle) {
     Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
     bytecodes()->push_back(Bytecodes::ToByte(prefix));
@@ -54,10 +173,12 @@
 
   int register_operand_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
   const uint32_t* const operands = node->operands();
+  const OperandSize* operand_sizes =
+      Bytecodes::GetOperandSizes(bytecode, operand_scale);
   const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
   for (int i = 0; operand_types[i] != OperandType::kNone; ++i) {
     OperandType operand_type = operand_types[i];
-    switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+    switch (operand_sizes[i]) {
       case OperandSize::kNone:
         UNREACHABLE();
         break;
@@ -93,11 +214,175 @@
   }
 }
 
-// override
-void BytecodeArrayWriter::FlushBasicBlock() {}
+// static
+Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
+  switch (jump_bytecode) {
+    case Bytecode::kJump:
+      return Bytecode::kJumpConstant;
+    case Bytecode::kJumpIfTrue:
+      return Bytecode::kJumpIfTrueConstant;
+    case Bytecode::kJumpIfFalse:
+      return Bytecode::kJumpIfFalseConstant;
+    case Bytecode::kJumpIfToBooleanTrue:
+      return Bytecode::kJumpIfToBooleanTrueConstant;
+    case Bytecode::kJumpIfToBooleanFalse:
+      return Bytecode::kJumpIfToBooleanFalseConstant;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstant;
+    case Bytecode::kJumpIfNull:
+      return Bytecode::kJumpIfNullConstant;
+    case Bytecode::kJumpIfUndefined:
+      return Bytecode::kJumpIfUndefinedConstant;
+    default:
+      UNREACHABLE();
+      return Bytecode::kIllegal;
+  }
+}
 
-int BytecodeArrayWriter::GetMaximumFrameSizeUsed() {
-  return max_register_count_ * kPointerSize;
+void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
+                                                   int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  size_t operand_location = jump_location + 1;
+  DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
+  if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
+    // The jump fits within the range of an Imm operand, so cancel
+    // the reservation and jump directly.
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+    bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
+  } else {
+    // The jump does not fit within the range of an Imm operand, so
+    // commit reservation putting the offset into the constant pool,
+    // and update the jump instruction and operand.
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+    DCHECK_LE(entry, kMaxUInt32);
+    DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
+              OperandSize::kByte);
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
+  }
+}
+
+void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
+                                                    int delta) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  size_t operand_location = jump_location + 1;
+  uint8_t operand_bytes[2];
+  if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
+    constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+  } else {
+    jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+    bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+    size_t entry = constant_array_builder()->CommitReservedEntry(
+        OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+    WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+  }
+  DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
+  bytecodes()->at(operand_location++) = operand_bytes[0];
+  bytecodes()->at(operand_location) = operand_bytes[1];
+}
+
+void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
+                                                    int delta) {
+  DCHECK(Bytecodes::IsJumpImmediate(
+      Bytecodes::FromByte(bytecodes()->at(jump_location))));
+  constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+  uint8_t operand_bytes[4];
+  WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+  size_t operand_location = jump_location + 1;
+  DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
+         bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
+  bytecodes()->at(operand_location++) = operand_bytes[0];
+  bytecodes()->at(operand_location++) = operand_bytes[1];
+  bytecodes()->at(operand_location++) = operand_bytes[2];
+  bytecodes()->at(operand_location) = operand_bytes[3];
+}
+
+void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
+  Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  int delta = static_cast<int>(jump_target - jump_location);
+  int prefix_offset = 0;
+  OperandScale operand_scale = OperandScale::kSingle;
+  if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+    // If a prefix scaling bytecode is emitted the target offset is one
+    // less than the case of no prefix scaling bytecode.
+    delta -= 1;
+    prefix_offset = 1;
+    operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+    jump_bytecode =
+        Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
+  }
+
+  DCHECK(Bytecodes::IsJump(jump_bytecode));
+  switch (operand_scale) {
+    case OperandScale::kSingle:
+      PatchJumpWith8BitOperand(jump_location, delta);
+      break;
+    case OperandScale::kDouble:
+      PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
+      break;
+    case OperandScale::kQuadruple:
+      PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  unbound_jumps_--;
+}
+
+void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
+  DCHECK(Bytecodes::IsJump(node->bytecode()));
+  DCHECK_EQ(0, node->operand(0));
+
+  size_t current_offset = bytecodes()->size();
+
+  if (label->is_bound()) {
+    CHECK_GE(current_offset, label->offset());
+    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+    // Label has been bound already so this is a backwards jump.
+    size_t abs_delta = current_offset - label->offset();
+    int delta = -static_cast<int>(abs_delta);
+    OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
+    if (operand_size > OperandSize::kByte) {
+      // Adjust for scaling byte prefix for wide jump offset.
+      DCHECK_LE(delta, 0);
+      delta -= 1;
+    }
+    node->set_bytecode(node->bytecode(), delta);
+  } else {
+    // The label has not yet been bound so this is a forward reference
+    // that will be patched when the label is bound. We create a
+    // reservation in the constant pool so the jump can be patched
+    // when the label is bound. The reservation means the maximum size
+    // of the operand for the constant is known and the jump can
+    // be emitted into the bytecode stream with space for the operand.
+    unbound_jumps_++;
+    label->set_referrer(current_offset);
+    OperandSize reserved_operand_size =
+        constant_array_builder()->CreateReservedEntry();
+    switch (reserved_operand_size) {
+      case OperandSize::kNone:
+        UNREACHABLE();
+        break;
+      case OperandSize::kByte:
+        node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+        break;
+      case OperandSize::kShort:
+        node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+        break;
+      case OperandSize::kQuad:
+        node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+        break;
+    }
+  }
+  EmitBytecode(node);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
index b1303c9..76d881e 100644
--- a/src/interpreter/bytecode-array-writer.h
+++ b/src/interpreter/bytecode-array-writer.h
@@ -6,40 +6,70 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
 
 #include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/source-position-table.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
+class BytecodeLabel;
 class SourcePositionTableBuilder;
+class ConstantArrayBuilder;
 
 // Class for emitting bytecode as the final stage of the bytecode
 // generation pipeline.
 class BytecodeArrayWriter final : public BytecodePipelineStage {
  public:
-  BytecodeArrayWriter(
-      Zone* zone, SourcePositionTableBuilder* source_position_table_builder);
+  BytecodeArrayWriter(Isolate* isolate, Zone* zone,
+                      ConstantArrayBuilder* constant_array_builder);
   virtual ~BytecodeArrayWriter();
 
+  // BytecodePipelineStage interface.
   void Write(BytecodeNode* node) override;
-  size_t FlushForOffset() override;
-  void FlushBasicBlock() override;
-
-  // Get the bytecode vector.
-  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
-
-  // Returns the size in bytes of the frame associated with the
-  // bytecode written.
-  int GetMaximumFrameSizeUsed();
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
 
  private:
+  // Constants that act as placeholders for jump operands to be
+  // patched. These have operand sizes that match the sizes of
+  // reserved constant pool entries.
+  const uint32_t k8BitJumpPlaceholder = 0x7f;
+  const uint32_t k16BitJumpPlaceholder =
+      k8BitJumpPlaceholder | (k8BitJumpPlaceholder << 8);
+  const uint32_t k32BitJumpPlaceholder =
+      k16BitJumpPlaceholder | (k16BitJumpPlaceholder << 16);
+
+  void PatchJump(size_t jump_target, size_t jump_location);
+  void PatchJumpWith8BitOperand(size_t jump_location, int delta);
+  void PatchJumpWith16BitOperand(size_t jump_location, int delta);
+  void PatchJumpWith32BitOperand(size_t jump_location, int delta);
+
   void EmitBytecode(const BytecodeNode* const node);
+  void EmitJump(BytecodeNode* node, BytecodeLabel* label);
   void UpdateSourcePositionTable(const BytecodeNode* const node);
 
+  Isolate* isolate() { return isolate_; }
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+  SourcePositionTableBuilder* source_position_table_builder() {
+    return &source_position_table_builder_;
+  }
+  ConstantArrayBuilder* constant_array_builder() {
+    return constant_array_builder_;
+  }
+  int max_register_count() { return max_register_count_; }
+
+  Isolate* isolate_;
   ZoneVector<uint8_t> bytecodes_;
   int max_register_count_;
-  SourcePositionTableBuilder* source_position_table_builder_;
+  int unbound_jumps_;
+  SourcePositionTableBuilder source_position_table_builder_;
+  ConstantArrayBuilder* constant_array_builder_;
 
+  friend class BytecodeArrayWriterUnittest;
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
 };
 
diff --git a/src/interpreter/bytecode-dead-code-optimizer.cc b/src/interpreter/bytecode-dead-code-optimizer.cc
new file mode 100644
index 0000000..964d2a8
--- /dev/null
+++ b/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeDeadCodeOptimizer::BytecodeDeadCodeOptimizer(
+    BytecodePipelineStage* next_stage)
+    : next_stage_(next_stage), exit_seen_in_block_(false) {}
+
+// override
+Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::Write(BytecodeNode* node) {
+  // Don't emit dead code.
+  if (exit_seen_in_block_) return;
+
+  switch (node->bytecode()) {
+    case Bytecode::kReturn:
+    case Bytecode::kThrow:
+    case Bytecode::kReThrow:
+      exit_seen_in_block_ = true;
+      break;
+    default:
+      break;
+  }
+
+  next_stage_->Write(node);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  // Don't emit dead code.
+  // TODO(rmcilroy): For forward jumps we could mark the label as dead, thereby
+  // avoiding emitting dead code when we bind the label.
+  if (exit_seen_in_block_) return;
+
+  switch (node->bytecode()) {
+    case Bytecode::kJump:
+    case Bytecode::kJumpConstant:
+      exit_seen_in_block_ = true;
+      break;
+    default:
+      break;
+  }
+
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(BytecodeLabel* label) {
+  next_stage_->BindLabel(label);
+  exit_seen_in_block_ = false;
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  next_stage_->BindLabel(target, label);
+  // exit_seen_in_block_ was reset when target was bound, so shouldn't be
+  // changed here.
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-dead-code-optimizer.h b/src/interpreter/bytecode-dead-code-optimizer.h
new file mode 100644
index 0000000..8d68e54
--- /dev/null
+++ b/src/interpreter/bytecode-dead-code-optimizer.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating obviously dead code in bytecode
+// generation.
+class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
+                                        public ZoneObject {
+ public:
+  explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
+
+  // BytecodePipelineStage interface.
+  void Write(BytecodeNode* node) override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
+
+ private:
+  BytecodePipelineStage* next_stage_;
+  bool exit_seen_in_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeDeadCodeOptimizer);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 650234a..b7cfd49 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -17,7 +17,6 @@
 namespace internal {
 namespace interpreter {
 
-
 // Scoped class tracking context objects created by the visitor. Represents
 // mutations of the context chain within the function body, allowing pushing and
 // popping of the current {context_register} during visitation.
@@ -88,7 +87,6 @@
   bool should_pop_context_;
 };
 
-
 // Scoped class for tracking control statements entered by the
 // visitor. The pattern derives AstGraphBuilder::ControlScope.
 class BytecodeGenerator::ControlScope BASE_EMBEDDED {
@@ -124,7 +122,6 @@
   DISALLOW_COPY_AND_ASSIGN(ControlScope);
 };
 
-
 // Helper class for a try-finally control scope. It can record intercepted
 // control-flow commands that cause entry into a finally-block, and re-apply
 // them after again leaving that block. Special tokens are used to identify
@@ -203,7 +200,6 @@
   Register result_register_;
 };
 
-
 // Scoped class for dealing with control flow reaching the function level.
 class BytecodeGenerator::ControlScopeForTopLevel final
     : public BytecodeGenerator::ControlScope {
@@ -228,7 +224,6 @@
   }
 };
 
-
 // Scoped class for enabling break inside blocks and switch blocks.
 class BytecodeGenerator::ControlScopeForBreakable final
     : public BytecodeGenerator::ControlScope {
@@ -260,7 +255,6 @@
   BreakableControlFlowBuilder* control_builder_;
 };
 
-
 // Scoped class for enabling 'break' and 'continue' in iteration
 // constructs, e.g. do...while, while..., for...
 class BytecodeGenerator::ControlScopeForIteration final
@@ -295,7 +289,6 @@
   LoopBuilder* loop_builder_;
 };
 
-
 // Scoped class for enabling 'throw' in try-catch constructs.
 class BytecodeGenerator::ControlScopeForTryCatch final
     : public BytecodeGenerator::ControlScope {
@@ -324,7 +317,6 @@
   }
 };
 
-
 // Scoped class for enabling control flow through try-finally constructs.
 class BytecodeGenerator::ControlScopeForTryFinally final
     : public BytecodeGenerator::ControlScope {
@@ -360,7 +352,6 @@
   DeferredCommands* commands_;
 };
 
-
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
@@ -383,7 +374,6 @@
   UNREACHABLE();
 }
 
-
 class BytecodeGenerator::RegisterAllocationScope {
  public:
   explicit RegisterAllocationScope(BytecodeGenerator* generator)
@@ -441,7 +431,6 @@
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
 };
 
-
 // Scoped base class for determining where the result of an expression
 // is stored.
 class BytecodeGenerator::ExpressionResultScope {
@@ -489,7 +478,6 @@
   DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
 };
 
-
 // Scoped class used when the result of the current expression is not
 // expected to produce a result.
 class BytecodeGenerator::EffectResultScope final
@@ -504,7 +492,6 @@
   virtual void SetResultInRegister(Register reg) {}
 };
 
-
 // Scoped class used when the result of the current expression to be
 // evaluated should go into the interpreter's accumulator register.
 class BytecodeGenerator::AccumulatorResultScope final
@@ -521,7 +508,6 @@
   }
 };
 
-
 // Scoped class used when the result of the current expression to be
 // evaluated should go into an interpreter register.
 class BytecodeGenerator::RegisterResultScope final
@@ -585,7 +571,7 @@
 
   RegisterAllocationScope register_scope(this);
 
-  if (IsGeneratorFunction(info()->literal()->kind())) {
+  if (IsResumableFunction(info()->literal()->kind())) {
     generator_state_ = register_allocator()->NewRegister();
     VisitGeneratorPrologue();
   }
@@ -613,7 +599,6 @@
   return builder()->ToBytecodeArray();
 }
 
-
 void BytecodeGenerator::MakeBytecodeBody() {
   // Build the arguments object if it is used.
   VisitArgumentsObject(scope()->arguments());
@@ -656,13 +641,7 @@
         .JumpIfTrue(&(targets[i]));
   }
 
-  RegisterAllocationScope register_scope(this);
-  Register reason = register_allocator()->NewRegister();
-  BailoutReason bailout_reason = BailoutReason::kInvalidJumpTableIndex;
-  builder()
-      ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
-      .StoreAccumulatorInRegister(reason)
-      .CallRuntime(Runtime::kAbort, reason, 1);
+  BuildAbort(BailoutReason::kInvalidJumpTableIndex);
 }
 
 void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
@@ -717,10 +696,13 @@
   BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
                    generator_resume_points_);
 
-  builder()->Bind(&regular_call);
+  builder()
+      ->Bind(&regular_call)
+      .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+      .StoreAccumulatorInRegister(generator_state_);
   // This is a regular call. Fall through to the ordinary function prologue,
-  // after which we will run into the generator object creation and the initial
-  // yield (both inserted by the parser).
+  // after which we will run into the generator object creation and other extra
+  // code inserted by the parser.
 }
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -734,7 +716,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
   BlockBuilder block_builder(builder());
   ControlScopeForBreakable execution_control(this, stmt, &block_builder);
@@ -745,7 +726,6 @@
   if (stmt->labels() != nullptr) block_builder.EndBlock();
 }
 
-
 void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   VariableMode mode = decl->mode();
@@ -780,34 +760,20 @@
       }
       break;
     case VariableLocation::LOOKUP: {
-      DCHECK(IsDeclaredVariableMode(mode));
+      DCHECK_EQ(VAR, mode);
+      DCHECK(!hole_init);
 
-      register_allocator()->PrepareForConsecutiveAllocations(3);
-      Register name = register_allocator()->NextConsecutiveRegister();
-      Register init_value = register_allocator()->NextConsecutiveRegister();
-      Register attributes = register_allocator()->NextConsecutiveRegister();
+      Register name = register_allocator()->NewRegister();
 
-      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
-      if (hole_init) {
-        builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
-      } else {
-        // For variables, we must not use an initial value (such as 'undefined')
-        // because we may have a (legal) redeclaration and we must not destroy
-        // the current value.
-        builder()
-            ->LoadLiteral(Smi::FromInt(0))
-            .StoreAccumulatorInRegister(init_value);
-      }
       builder()
-          ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
-          .StoreAccumulatorInRegister(attributes)
-          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+          ->LoadLiteral(variable->name())
+          .StoreAccumulatorInRegister(name)
+          .CallRuntime(Runtime::kDeclareEvalVar, name, 1);
       break;
     }
   }
 }
 
-
 void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
   Variable* variable = decl->proxy()->var();
   switch (variable->location()) {
@@ -838,33 +804,26 @@
       break;
     }
     case VariableLocation::LOOKUP: {
-      register_allocator()->PrepareForConsecutiveAllocations(3);
+      register_allocator()->PrepareForConsecutiveAllocations(2);
       Register name = register_allocator()->NextConsecutiveRegister();
       Register literal = register_allocator()->NextConsecutiveRegister();
-      Register attributes = register_allocator()->NextConsecutiveRegister();
       builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
 
       VisitForAccumulatorValue(decl->fun());
-      builder()
-          ->StoreAccumulatorInRegister(literal)
-          .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
-          .StoreAccumulatorInRegister(attributes)
-          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+      builder()->StoreAccumulatorInRegister(literal).CallRuntime(
+          Runtime::kDeclareEvalFunction, name, 2);
     }
   }
 }
 
-
 void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
   UNIMPLEMENTED();
 }
 
-
 void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
   UNIMPLEMENTED();
 }
 
-
 void BytecodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
   RegisterAllocationScope register_scope(this);
@@ -893,7 +852,6 @@
   globals()->clear();
 }
 
-
 void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     // Allocate an outer register allocations scope for the statement.
@@ -904,17 +862,14 @@
   }
 }
 
-
 void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForEffect(stmt->expression());
 }
 
-
 void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
 }
 
-
 void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   BytecodeLabel else_label, end_label;
@@ -944,32 +899,27 @@
   }
 }
 
-
 void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
     SloppyBlockFunctionStatement* stmt) {
   Visit(stmt->statement());
 }
 
-
 void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   execution_control()->Continue(stmt->target());
 }
 
-
 void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   execution_control()->Break(stmt->target());
 }
 
-
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
   execution_control()->ReturnAccumulator();
 }
 
-
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
@@ -978,7 +928,6 @@
   VisitInScope(stmt->statement(), stmt->scope());
 }
 
-
 void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
   // We need this scope because we visit for register values. We have to
   // maintain a execution result scope where registers can be allocated.
@@ -1029,7 +978,6 @@
   switch_builder.SetBreakTarget(done_label);
 }
 
-
 void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
   // Handled entirely in VisitSwitchStatement.
   UNREACHABLE();
@@ -1078,7 +1026,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
   if (stmt->init() != nullptr) {
     Visit(stmt->init());
@@ -1105,7 +1052,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitForInAssignment(Expression* expr,
                                              FeedbackVectorSlot slot) {
   DCHECK(expr->IsValidReferenceExpression());
@@ -1179,7 +1125,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   if (stmt->subject()->IsNullLiteral() ||
       stmt->subject()->IsUndefinedLiteral()) {
@@ -1231,7 +1176,6 @@
   builder()->Bind(&subject_undefined_label);
 }
 
-
 void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
   LoopBuilder loop_builder(builder());
   ControlScopeForIteration control_scope(this, stmt, &loop_builder);
@@ -1251,7 +1195,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   TryCatchBuilder try_control_builder(builder());
   Register no_reg;
@@ -1288,7 +1231,6 @@
   try_control_builder.EndCatch();
 }
 
-
 void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
   TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
   Register no_reg;
@@ -1353,13 +1295,11 @@
   commands.ApplyDeferredCommands();
 }
 
-
 void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   builder()->Debugger();
 }
 
-
 void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   // Find or build a shared function info.
   Handle<SharedFunctionInfo> shared_info =
@@ -1372,7 +1312,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
   if (expr->scope()->ContextLocalCount() > 0) {
     VisitNewLocalBlockContext(expr->scope());
@@ -1530,13 +1469,11 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
   VisitBlock(expr->block());
   VisitVariableProxy(expr->result());
 }
 
-
 void BytecodeGenerator::VisitConditional(Conditional* expr) {
   // TODO(rmcilroy): Spot easy cases where there code would not need to
   // emit the then block or the else block, e.g. condition is
@@ -1557,21 +1494,20 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitLiteral(Literal* expr) {
   if (!execution_result()->IsEffect()) {
     Handle<Object> value = expr->value();
     if (value->IsSmi()) {
       builder()->LoadLiteral(Smi::cast(*value));
-    } else if (value->IsUndefined()) {
+    } else if (value->IsUndefined(isolate())) {
       builder()->LoadUndefined();
-    } else if (value->IsTrue()) {
+    } else if (value->IsTrue(isolate())) {
       builder()->LoadTrue();
-    } else if (value->IsFalse()) {
+    } else if (value->IsFalse(isolate())) {
       builder()->LoadFalse();
-    } else if (value->IsNull()) {
+    } else if (value->IsNull(isolate())) {
       builder()->LoadNull();
-    } else if (value->IsTheHole()) {
+    } else if (value->IsTheHole(isolate())) {
       builder()->LoadTheHole();
     } else {
       builder()->LoadLiteral(value);
@@ -1580,7 +1516,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   // Materialize a regular expression literal.
   builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
@@ -1588,7 +1523,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   // Copy the literal boilerplate.
   int fast_clone_properties_count = 0;
@@ -1792,7 +1726,6 @@
   execution_result()->SetResultInRegister(literal);
 }
 
-
 void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   // Deep-copy the literal boilerplate.
   builder()->CreateArrayLiteral(expr->constant_elements(),
@@ -1832,7 +1765,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
   builder()->SetExpressionPosition(proxy);
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
@@ -1868,8 +1800,7 @@
     }
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
-      builder()->LoadGlobal(variable->name(), feedback_index(slot),
-                            typeof_mode);
+      builder()->LoadGlobal(feedback_index(slot), typeof_mode);
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1958,6 +1889,15 @@
   builder()->CallRuntime(function_id, receiver, 4);
 }
 
+void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
+  RegisterAllocationScope register_scope(this);
+  Register reason = register_allocator()->NewRegister();
+  builder()
+      ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
+      .StoreAccumulatorInRegister(reason)
+      .CallRuntime(Runtime::kAbort, reason, 1);
+}
+
 void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
   RegisterAllocationScope register_scope(this);
   Register name_reg = register_allocator()->NewRegister();
@@ -2125,7 +2065,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitAssignment(Assignment* expr) {
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
   Register object, key, home_object, value;
@@ -2297,12 +2236,12 @@
 
     Register input = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kGeneratorGetInput, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator, 1)
         .StoreAccumulatorInRegister(input);
 
     Register resume_mode = register_allocator()->NewRegister();
     builder()
-        ->CallRuntime(Runtime::kGeneratorGetResumeMode, generator, 1)
+        ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator, 1)
         .StoreAccumulatorInRegister(resume_mode);
 
     // Now dispatch on resume mode.
@@ -2329,14 +2268,13 @@
           ->MoveRegister(input, value)
           .LoadTrue()
           .StoreAccumulatorInRegister(done)
-          .CallRuntime(Runtime::kCreateIterResultObject, value, 2);
+          .CallRuntime(Runtime::kInlineCreateIterResultObject, value, 2);
       execution_control()->ReturnAccumulator();
     }
 
     builder()->Bind(&resume_with_throw);
-    builder()
-        ->LoadAccumulatorWithRegister(input)
-        .Throw();
+    builder()->SetExpressionPosition(expr);
+    builder()->LoadAccumulatorWithRegister(input).Throw();
 
     builder()->Bind(&resume_with_next);
     builder()->LoadAccumulatorWithRegister(input);
@@ -2355,7 +2293,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
   LhsKind property_kind = Property::GetAssignType(expr);
   FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
@@ -2649,7 +2586,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   if (expr->is_jsruntime()) {
@@ -2670,14 +2606,12 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
   VisitForEffect(expr->expression());
   builder()->LoadUndefined();
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
   if (expr->expression()->IsVariableProxy()) {
     // Typeof does not throw a reference error on global variables, hence we
@@ -2692,14 +2626,12 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
   VisitForAccumulatorValue(expr->expression());
   builder()->LogicalNot();
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::Value::NOT:
@@ -2725,7 +2657,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
   if (expr->expression()->IsProperty()) {
     // Delete of an object property is allowed both in sloppy
@@ -2787,7 +2718,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
   DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
 
@@ -2909,7 +2839,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
   switch (binop->op()) {
     case Token::COMMA:
@@ -2927,7 +2856,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
@@ -2936,7 +2864,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
@@ -2944,39 +2871,32 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
 
-
 void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
   UNREACHABLE();
 }
 
-
 void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
   execution_result()->SetResultInRegister(Register::function_closure());
 }
 
-
 void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
   // Handled by VisitCall().
   UNREACHABLE();
 }
 
-
 void BytecodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
   builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
   VisitForEffect(binop->left());
   Visit(binop->right());
 }
 
-
 void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
   Expression* left = binop->left();
   Expression* right = binop->right();
@@ -2995,7 +2915,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
   Expression* left = binop->left();
   Expression* right = binop->right();
@@ -3014,12 +2933,10 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
-
 void BytecodeGenerator::VisitNewLocalFunctionContext() {
   AccumulatorResultScope accumulator_execution_result(this);
   Scope* scope = this->scope();
@@ -3043,7 +2960,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitBuildLocalActivationContext() {
   Scope* scope = this->scope();
 
@@ -3072,7 +2988,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
   AccumulatorResultScope accumulator_execution_result(this);
   DCHECK(scope->is_block_scope());
@@ -3126,7 +3041,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitObjectLiteralAccessor(
     Register home_object, ObjectLiteralProperty* property, Register value_out) {
   // TODO(rmcilroy): Replace value_out with VisitForRegister();
@@ -3152,7 +3066,6 @@
   }
 }
 
-
 void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
   if (variable == nullptr) return;
 
@@ -3187,7 +3100,6 @@
   VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
-
 void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
   if (variable == nullptr) return;
 
@@ -3196,7 +3108,6 @@
   VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
-
 void BytecodeGenerator::VisitFunctionClosureForContext() {
   AccumulatorResultScope accumulator_execution_result(this);
   Scope* closure_scope = execution_context()->scope()->ClosureScope();
@@ -3223,7 +3134,6 @@
   execution_result()->SetResultInAccumulator();
 }
 
-
 // Visits the expression |expr| and places the result in the accumulator.
 void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
   AccumulatorResultScope accumulator_scope(this);
@@ -3244,7 +3154,6 @@
   Visit(expr);
 }
 
-
 // Visits the expression |expr| and returns the register containing
 // the expression result.
 Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
@@ -3268,14 +3177,12 @@
   Visit(stmt);
 }
 
-
 LanguageMode BytecodeGenerator::language_mode() const {
   return execution_context()->scope()->language_mode();
 }
 
-
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
-  return info()->shared_info()->feedback_vector()->GetIndex(slot);
+  return TypeFeedbackVector::GetIndex(slot);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 0dcc9be..3adca6b 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -7,6 +7,7 @@
 
 #include "src/ast/ast.h"
 #include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecodes.h"
 
 namespace v8 {
@@ -105,6 +106,7 @@
   void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
                                    Register key);
 
+  void BuildAbort(BailoutReason bailout_reason);
   void BuildThrowIfHole(Handle<String> name);
   void BuildThrowIfNotHole(Handle<String> name);
   void BuildThrowReassignConstant(Handle<String> name);
diff --git a/src/interpreter/bytecode-label.h b/src/interpreter/bytecode-label.h
new file mode 100644
index 0000000..2f89c48
--- /dev/null
+++ b/src/interpreter/bytecode-label.h
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
+#define V8_INTERPRETER_BYTECODE_LABEL_H_
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// A label representing a branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode
+// array. For labels that are forward references there can be at most
+// one reference whilst it is unbound.
+class BytecodeLabel final {
+ public:
+  BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+
+  bool is_bound() const { return bound_; }
+  size_t offset() const { return offset_; }
+
+ private:
+  static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+  void bind_to(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset);
+    offset_ = offset;
+    bound_ = true;
+  }
+
+  void set_referrer(size_t offset) {
+    DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
+    offset_ = offset;
+  }
+
+  bool is_forward_target() const {
+    return offset() != kInvalidOffset && !is_bound();
+  }
+
+  // There are three states for a label:
+  //                    bound_   offset_
+  //  UNSET             false    kInvalidOffset
+  //  FORWARD_TARGET    false    Offset of referring jump
+  //  BACKWARD_TARGET    true    Offset of label in bytecode array when bound
+  bool bound_;
+  size_t offset_;
+
+  friend class BytecodeArrayWriter;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_LABEL_H_
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index 803fc23..1108d83 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -15,12 +15,57 @@
 BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
     ConstantArrayBuilder* constant_array_builder,
     BytecodePipelineStage* next_stage)
-    : constant_array_builder_(constant_array_builder),
-      next_stage_(next_stage),
-      last_is_discardable_(false) {
+    : constant_array_builder_(constant_array_builder), next_stage_(next_stage) {
   InvalidateLast();
 }
 
+// override
+Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  Flush();
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+  node = OptimizeAndEmitLast(node);
+  if (node != nullptr) {
+    SetLast(node);
+  }
+}
+
+// override
+void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  node = OptimizeAndEmitLast(node);
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(BytecodeLabel* label) {
+  Flush();
+  next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  // There is no need to flush here, it will have been flushed when |target|
+  // was bound.
+  next_stage_->BindLabel(target, label);
+}
+
+void BytecodePeepholeOptimizer::Flush() {
+  // TODO(oth/rmcilroy): We could check CanElideLast() here to potentially
+  // eliminate last rather than writing it.
+  if (LastIsValid()) {
+    next_stage_->Write(&last_);
+    InvalidateLast();
+  }
+}
+
 void BytecodePeepholeOptimizer::InvalidateLast() {
   last_.set_bytecode(Bytecode::kIllegal);
 }
@@ -31,51 +76,6 @@
 
 void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
   last_.Clone(node);
-  last_is_discardable_ = true;
-}
-
-// override
-size_t BytecodePeepholeOptimizer::FlushForOffset() {
-  size_t buffered_size = next_stage_->FlushForOffset();
-  if (LastIsValid()) {
-    if (last_.bytecode() == Bytecode::kNop &&
-        !last_.source_info().is_statement()) {
-      // The Nop can be dropped as it doesn't have a statement
-      // position for the debugger and doesn't have any effects by
-      // definition.
-      InvalidateLast();
-    } else {
-      buffered_size += last_.Size();
-      last_is_discardable_ = false;
-    }
-  }
-  return buffered_size;
-}
-
-// override
-void BytecodePeepholeOptimizer::FlushBasicBlock() {
-  if (LastIsValid()) {
-    next_stage_->Write(&last_);
-    InvalidateLast();
-  }
-  next_stage_->FlushBasicBlock();
-}
-
-// override
-void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
-  // Attempt optimization if there is an earlier node to optimize with.
-  if (LastIsValid()) {
-    node = Optimize(node);
-    // Only output the last node if it wasn't invalidated by the optimization.
-    if (LastIsValid()) {
-      next_stage_->Write(&last_);
-      InvalidateLast();
-    }
-  }
-
-  if (node != nullptr) {
-    SetLast(node);
-  }
 }
 
 Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
@@ -94,22 +94,18 @@
            GetConstantForIndexOperand(&last_, 0)->IsName()));
 }
 
-void BytecodePeepholeOptimizer::UpdateCurrentBytecode(BytecodeNode* current) {
-  if (Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
-      Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
-    // Conditional jumps with boolean conditions are emitted in
-    // ToBoolean form by the bytecode array builder,
-    // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean element
-    // can be removed if the previous bytecode put a boolean value in
-    // the accumulator.
-    Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
-    current->set_bytecode(jump, current->operand(0), current->operand_scale());
-  } else if (current->bytecode() == Bytecode::kToBooleanLogicalNot &&
-             Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
-    // Logical-nots are emitted in ToBoolean form by the bytecode array
-    // builder, The ToBoolean element can be removed if the previous bytecode
-    // put a boolean value in the accumulator.
-    current->set_bytecode(Bytecode::kLogicalNot);
+void BytecodePeepholeOptimizer::TryToRemoveLastExpressionPosition(
+    const BytecodeNode* const current) {
+  if (current->source_info().is_valid() &&
+      last_.source_info().is_expression() &&
+      Bytecodes::IsWithoutExternalSideEffects(last_.bytecode())) {
+    // The last bytecode has been marked as expression. It has no
+    // external effects so can't throw and the current bytecode is a
+    // source position. Remove the expression position on the last
+    // bytecode to open up potential peephole optimizations and to
+    // save the memory and perf cost of storing the unneeded
+    // expression position.
+    last_.source_info().set_invalid();
   }
 }
 
@@ -134,15 +130,135 @@
   }
 }
 
+bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
+    const BytecodeNode* const current) const {
+  //
+  // The rules for allowing the elision of the last bytecode based
+  // on source position are:
+  //
+  //                     C U R R E N T
+  //              +--------+--------+--------+
+  //              |  None  |  Expr  |  Stmt  |
+  //  L  +--------+--------+--------+--------+
+  //     |  None  |  YES   |  YES   |  YES   |
+  //  A  +--------+--------+--------+--------+
+  //     |  Expr  |  YES   | MAYBE  |  MAYBE |
+  //  S  +--------+--------+--------+--------+
+  //     |  Stmt  |  YES   |   NO   |   NO   |
+  //  T  +--------+--------+--------+--------+
+  //
+  // The goal is not lose any statement positions and not lose useful
+  // expression positions. Whenever the last bytecode is elided it's
+  // source position information is applied to the current node
+  // updating it if necessary.
+  //
+  // The last bytecode can be elided for the MAYBE cases if the last
+  // bytecode is known not to throw. If it throws, the system would
+  // not have correct stack trace information. The appropriate check
+  // for this would be Bytecodes::IsWithoutExternalSideEffects(),
+  // which is checked in
+  // BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes() to
+  // keep the check here simple.
+  //
+  // In rare cases, bytecode generation produces consecutive bytecodes
+  // with the same expression positions. In principle, the latter of
+  // these can be elided, but would make this function more expensive.
+  //
+  return (!last_.source_info().is_valid() ||
+          !current->source_info().is_valid());
+}
+
+namespace {
+
+void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
+                               BytecodeNode* const current) {
+  DCHECK_EQ(current->bytecode(), Bytecode::kStar);
+
+  //
+  // An example transformation here would be:
+  //
+  //   LdaGlobal i0, i1  ____\  LdrGlobal i0, i1, R
+  //   Star R            ====/  Ldar R
+  //
+  // which loads a global value into both a register and the
+  // accumulator. However, in the second form the Ldar can often be
+  // peephole optimized away unlike the Star in the first form.
+  //
+  last->Transform(new_bytecode, current->operand(0));
+  current->set_bytecode(Bytecode::kLdar, current->operand(0));
+}
+
+}  // namespace
+
+bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
+    BytecodeNode* const current) {
+  if (current->bytecode() == Bytecode::kStar &&
+      !current->source_info().is_statement()) {
+    // Note: If the Star is tagged with a statement position, we can't
+    // perform this transform as the store to the register will
+    // have the wrong ordering for stepping in the debugger.
+    switch (last_.bytecode()) {
+      case Bytecode::kLdaNamedProperty:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrNamedProperty, &last_, current);
+        return true;
+      case Bytecode::kLdaKeyedProperty:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrKeyedProperty, &last_, current);
+        return true;
+      case Bytecode::kLdaGlobal:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrGlobal, &last_, current);
+        return true;
+      case Bytecode::kLdaContextSlot:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrContextSlot, &last_, current);
+        return true;
+      case Bytecode::kLdaUndefined:
+        TransformLdaStarToLdrLdar(Bytecode::kLdrUndefined, &last_, current);
+        return true;
+      default:
+        break;
+    }
+  }
+  return false;
+}
+
+bool BytecodePeepholeOptimizer::RemoveToBooleanFromJump(
+    BytecodeNode* const current) {
+  bool can_remove = Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
+                    Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
+  if (can_remove) {
+    // Conditional jumps with boolean conditions are emiitted in
+    // ToBoolean form by the bytecode array builder,
+    // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean
+    // element can be removed if the previous bytecode put a boolean
+    // value in the accumulator.
+    Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
+    current->set_bytecode(jump, current->operand(0));
+  }
+  return can_remove;
+}
+
+bool BytecodePeepholeOptimizer::RemoveToBooleanFromLogicalNot(
+    BytecodeNode* const current) {
+  bool can_remove = current->bytecode() == Bytecode::kToBooleanLogicalNot &&
+                    Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
+  if (can_remove) {
+    // Logical-nots are emitted in ToBoolean form by the bytecode array
+    // builder, The ToBoolean element can be removed if the previous bytecode
+    // put a boolean value in the accumulator.
+    current->set_bytecode(Bytecode::kLogicalNot);
+  }
+  return can_remove;
+}
+
+bool BytecodePeepholeOptimizer::TransformCurrentBytecode(
+    BytecodeNode* const current) {
+  return RemoveToBooleanFromJump(current) ||
+         RemoveToBooleanFromLogicalNot(current);
+}
+
 bool BytecodePeepholeOptimizer::CanElideLast(
     const BytecodeNode* const current) const {
-  if (!last_is_discardable_) {
-    return false;
-  }
-
   if (last_.bytecode() == Bytecode::kNop) {
-    // Nop are placeholders for holding source position information
-    // and can be elided.
+    // Nop are placeholders for holding source position information.
     return true;
   } else if (Bytecodes::IsAccumulatorLoadWithoutEffects(current->bytecode()) &&
              Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
@@ -150,25 +266,58 @@
     // consecutive accumulator loads (that don't have side effects) then only
     // the final load is potentially visible.
     return true;
+  } else if (Bytecodes::GetAccumulatorUse(current->bytecode()) ==
+                 AccumulatorUse::kWrite &&
+             Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
+    // The current instruction clobbers the accumulator without reading it. The
+    // load in the last instruction can be elided as it has no effect.
+    return true;
   } else {
     return false;
   }
 }
 
 BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
-  UpdateCurrentBytecode(current);
+  TryToRemoveLastExpressionPosition(current);
+
+  if (TransformCurrentBytecode(current) ||
+      TransformLastAndCurrentBytecodes(current)) {
+    return current;
+  }
 
   if (CanElideCurrent(current)) {
     if (current->source_info().is_valid()) {
+      // Preserve the source information by replacing the current bytecode
+      // with a no op bytecode.
       current->set_bytecode(Bytecode::kNop);
     } else {
       current = nullptr;
     }
-  } else if (CanElideLast(current)) {
+    return current;
+  }
+
+  if (CanElideLast(current) && CanElideLastBasedOnSourcePosition(current)) {
     if (last_.source_info().is_valid()) {
-      current->source_info().Update(last_.source_info());
+      // Current can not be valid per CanElideLastBasedOnSourcePosition().
+      current->source_info().Clone(last_.source_info());
     }
     InvalidateLast();
+    return current;
+  }
+
+  return current;
+}
+
+BytecodeNode* BytecodePeepholeOptimizer::OptimizeAndEmitLast(
+    BytecodeNode* current) {
+  // Attempt optimization if there is an earlier node to optimize with.
+  if (LastIsValid()) {
+    current = Optimize(current);
+    // Only output the last node if it wasn't invalidated by the optimization.
+    if (LastIsValid()) {
+      next_stage_->Write(&last_);
+      InvalidateLast();
+    }
   }
   return current;
 }
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
index 1981395..e6ada2a 100644
--- a/src/interpreter/bytecode-peephole-optimizer.h
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -22,16 +22,31 @@
   BytecodePeepholeOptimizer(ConstantArrayBuilder* constant_array_builder,
                             BytecodePipelineStage* next_stage);
 
+  // BytecodePipelineStage interface.
   void Write(BytecodeNode* node) override;
-  size_t FlushForOffset() override;
-  void FlushBasicBlock() override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
 
  private:
+  BytecodeNode* OptimizeAndEmitLast(BytecodeNode* current);
   BytecodeNode* Optimize(BytecodeNode* current);
+  void Flush();
 
-  void UpdateCurrentBytecode(BytecodeNode* const current);
+  void TryToRemoveLastExpressionPosition(const BytecodeNode* const current);
+  bool TransformCurrentBytecode(BytecodeNode* const current);
+  bool TransformLastAndCurrentBytecodes(BytecodeNode* const current);
   bool CanElideCurrent(const BytecodeNode* const current) const;
   bool CanElideLast(const BytecodeNode* const current) const;
+  bool CanElideLastBasedOnSourcePosition(
+      const BytecodeNode* const current) const;
+
+  // Simple substitution methods.
+  bool RemoveToBooleanFromJump(BytecodeNode* const current);
+  bool RemoveToBooleanFromLogicalNot(BytecodeNode* const current);
 
   void InvalidateLast();
   bool LastIsValid() const;
@@ -45,7 +60,6 @@
   ConstantArrayBuilder* constant_array_builder_;
   BytecodePipelineStage* next_stage_;
   BytecodeNode last_;
-  bool last_is_discardable_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
 };
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
index 7bfb815..58ade92 100644
--- a/src/interpreter/bytecode-pipeline.cc
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -11,104 +11,74 @@
 namespace internal {
 namespace interpreter {
 
-void BytecodeSourceInfo::Update(const BytecodeSourceInfo& entry) {
-  DCHECK(entry.is_valid());
-  if (!is_valid() || (entry.is_statement() && !is_statement()) ||
-      (entry.is_statement() && is_statement() &&
-       entry.source_position() > source_position())) {
-    // Position is updated if any of the following conditions are met:
-    //   (1) there is no existing position.
-    //   (2) the incoming position is a statement and the current position
-    //       is an expression.
-    //   (3) the existing position is a statement and the incoming
-    //       statement has a later source position.
-    // Condition 3 is needed for the first statement in a function which
-    // may end up with later statement positions being added during bytecode
-    // generation.
-    source_position_ = entry.source_position_;
-    is_statement_ = entry.is_statement_;
-  }
-}
-
 BytecodeNode::BytecodeNode(Bytecode bytecode) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   bytecode_ = bytecode;
-  operand_scale_ = OperandScale::kSingle;
 }
 
-BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           OperandScale operand_scale) {
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
   bytecode_ = bytecode;
   operands_[0] = operand0;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, OperandScale operand_scale) {
+                           uint32_t operand1) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
-                           uint32_t operand1, uint32_t operand2,
-                           OperandScale operand_scale) {
+                           uint32_t operand1, uint32_t operand2) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
   operands_[2] = operand2;
-  operand_scale_ = operand_scale;
 }
 
 BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
                            uint32_t operand1, uint32_t operand2,
-                           uint32_t operand3, OperandScale operand_scale) {
+                           uint32_t operand3) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
   bytecode_ = bytecode;
   operands_[0] = operand0;
   operands_[1] = operand1;
   operands_[2] = operand2;
   operands_[3] = operand3;
-  operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(const BytecodeNode& other) {
+  memcpy(this, &other, sizeof(other));
+}
+
+BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
+  memcpy(this, &other, sizeof(other));
+  return *this;
 }
 
 void BytecodeNode::set_bytecode(Bytecode bytecode) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   bytecode_ = bytecode;
-  operand_scale_ = OperandScale::kSingle;
 }
 
-void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0,
-                                OperandScale operand_scale) {
+void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0) {
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
   bytecode_ = bytecode;
   operands_[0] = operand0;
-  operand_scale_ = operand_scale;
 }
 
-size_t BytecodeNode::Size() const {
-  size_t size = Bytecodes::Size(bytecode_, operand_scale_);
-  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
-    size += 1;
-  }
-  return size;
+void BytecodeNode::Clone(const BytecodeNode* const other) {
+  memcpy(this, other, sizeof(*other));
 }
 
 void BytecodeNode::Print(std::ostream& os) const {
 #ifdef DEBUG
   std::ios saved_state(nullptr);
   saved_state.copyfmt(os);
-
   os << Bytecodes::ToString(bytecode_);
-  if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
-    Bytecode scale_prefix =
-        Bytecodes::OperandScaleToPrefixBytecode(operand_scale_);
-    os << '.' << Bytecodes::ToString(scale_prefix);
-  }
 
   for (int i = 0; i < operand_count(); ++i) {
     os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
@@ -116,7 +86,7 @@
   os.copyfmt(saved_state);
 
   if (source_info_.is_valid()) {
-    os << source_info_;
+    os << ' ' << source_info_;
   }
   os << '\n';
 #else
@@ -124,8 +94,21 @@
 #endif  // DEBUG
 }
 
-void BytecodeNode::Clone(const BytecodeNode* const other) {
-  memcpy(this, other, sizeof(*other));
+void BytecodeNode::Transform(Bytecode new_bytecode, uint32_t extra_operand) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
+            Bytecodes::NumberOfOperands(bytecode()) + 1);
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
+         Bytecodes::GetOperandType(new_bytecode, 0) ==
+             Bytecodes::GetOperandType(bytecode(), 0));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
+         Bytecodes::GetOperandType(new_bytecode, 1) ==
+             Bytecodes::GetOperandType(bytecode(), 1));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
+         Bytecodes::GetOperandType(new_bytecode, 2) ==
+             Bytecodes::GetOperandType(bytecode(), 2));
+  DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
+  operands_[operand_count()] = extra_operand;
+  bytecode_ = new_bytecode;
 }
 
 bool BytecodeNode::operator==(const BytecodeNode& other) const {
@@ -144,11 +127,6 @@
   return true;
 }
 
-std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
-  node.Print(os);
-  return os;
-}
-
 std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
   if (info.is_valid()) {
     char description = info.is_statement() ? 'S' : 'E';
@@ -157,6 +135,11 @@
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
+  node.Print(os);
+  return os;
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index ade712c..e2beff2 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -13,6 +13,7 @@
 namespace internal {
 namespace interpreter {
 
+class BytecodeLabel;
 class BytecodeNode;
 class BytecodeSourceInfo;
 
@@ -26,12 +27,26 @@
   // deferring Write() to the next stage.
   virtual void Write(BytecodeNode* node) = 0;
 
-  // Flush state for bytecode array offset calculation. Returns the
-  // current size of bytecode array.
-  virtual size_t FlushForOffset() = 0;
+  // Write jump bytecode node |node| which jumps to |label| into pipeline.
+  // The node and label are only valid for the duration of the call. This call
+  // implicitly ends the current basic block so should always write to the next
+  // stage.
+  virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
 
-  // Flush state to terminate basic block.
-  virtual void FlushBasicBlock() = 0;
+  // Binds |label| to the current bytecode location. This call implicitly
+  // ends the current basic block and so any deferred bytecodes should be
+  // written to the next stage.
+  virtual void BindLabel(BytecodeLabel* label) = 0;
+
+  // Binds |label| to the location of |target|. This call implicitly
+  // ends the current basic block and so any deferred bytecodes should be
+  // written to the next stage.
+  virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
+
+  // Flush the pipeline and generate a bytecode array.
+  virtual Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) = 0;
 };
 
 // Source code position information.
@@ -39,35 +54,84 @@
  public:
   static const int kUninitializedPosition = -1;
 
-  BytecodeSourceInfo(int position = kUninitializedPosition,
-                     bool is_statement = false)
-      : source_position_(position), is_statement_(is_statement) {}
+  BytecodeSourceInfo()
+      : position_type_(PositionType::kNone),
+        source_position_(kUninitializedPosition) {}
 
-  // Combine later source info with current.
-  void Update(const BytecodeSourceInfo& entry);
+  BytecodeSourceInfo(int source_position, bool is_statement)
+      : position_type_(is_statement ? PositionType::kStatement
+                                    : PositionType::kExpression),
+        source_position_(source_position) {
+    DCHECK_GE(source_position, 0);
+  }
+
+  // Makes instance into a statement position.
+  void MakeStatementPosition(int source_position) {
+    // Statement positions can be replaced by other statement
+    // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
+    // statement position associated with 7 but no bytecode associated
+    // with it. Then Next is emitted after the body and has
+    // statement position and overrides the existing one.
+    position_type_ = PositionType::kStatement;
+    source_position_ = source_position;
+  }
+
+  // Makes instance into an expression position. Instance should not
+  // be a statement position otherwise it could be lost and impair the
+  // debugging experience.
+  void MakeExpressionPosition(int source_position) {
+    DCHECK(!is_statement());
+    position_type_ = PositionType::kExpression;
+    source_position_ = source_position;
+  }
+
+  // Forces an instance into an expression position.
+  void ForceExpressionPosition(int source_position) {
+    position_type_ = PositionType::kExpression;
+    source_position_ = source_position;
+  }
+
+  // Clones a source position. The current instance is expected to be
+  // invalid.
+  void Clone(const BytecodeSourceInfo& other) {
+    DCHECK(!is_valid());
+    position_type_ = other.position_type_;
+    source_position_ = other.source_position_;
+  }
 
   int source_position() const {
     DCHECK(is_valid());
     return source_position_;
   }
 
-  bool is_statement() const { return is_valid() && is_statement_; }
+  bool is_statement() const {
+    return position_type_ == PositionType::kStatement;
+  }
+  bool is_expression() const {
+    return position_type_ == PositionType::kExpression;
+  }
 
-  bool is_valid() const { return source_position_ != kUninitializedPosition; }
-  void set_invalid() { source_position_ = kUninitializedPosition; }
+  bool is_valid() const { return position_type_ != PositionType::kNone; }
+  void set_invalid() {
+    position_type_ = PositionType::kNone;
+    source_position_ = kUninitializedPosition;
+  }
 
   bool operator==(const BytecodeSourceInfo& other) const {
-    return source_position_ == other.source_position_ &&
-           is_statement_ == other.is_statement_;
+    return position_type_ == other.position_type_ &&
+           source_position_ == other.source_position_;
   }
+
   bool operator!=(const BytecodeSourceInfo& other) const {
-    return source_position_ != other.source_position_ ||
-           is_statement_ != other.is_statement_;
+    return position_type_ != other.position_type_ ||
+           source_position_ != other.source_position_;
   }
 
  private:
+  enum class PositionType : uint8_t { kNone, kExpression, kStatement };
+
+  PositionType position_type_;
   int source_position_;
-  bool is_statement_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
 };
@@ -77,19 +141,18 @@
 class BytecodeNode final : ZoneObject {
  public:
   explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0,
-               OperandScale operand_scale);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0);
+  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
   BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               OperandScale operand_scale);
+               uint32_t operand2);
   BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2, OperandScale operand_scale);
-  BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-               uint32_t operand2, uint32_t operand3,
-               OperandScale operand_scale);
+               uint32_t operand2, uint32_t operand3);
+
+  BytecodeNode(const BytecodeNode& other);
+  BytecodeNode& operator=(const BytecodeNode& other);
 
   void set_bytecode(Bytecode bytecode);
-  void set_bytecode(Bytecode bytecode, uint32_t operand0,
-                    OperandScale operand_scale);
+  void set_bytecode(Bytecode bytecode, uint32_t operand0);
 
   // Clone |other|.
   void Clone(const BytecodeNode* const other);
@@ -97,8 +160,9 @@
   // Print to stream |os|.
   void Print(std::ostream& os) const;
 
-  // Return the size when this node is serialized to a bytecode array.
-  size_t Size() const;
+  // Transform to a node representing |new_bytecode| which has one
+  // operand more than the current bytecode.
+  void Transform(Bytecode new_bytecode, uint32_t extra_operand);
 
   Bytecode bytecode() const { return bytecode_; }
 
@@ -110,7 +174,6 @@
   const uint32_t* operands() const { return operands_; }
 
   int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
-  OperandScale operand_scale() const { return operand_scale_; }
 
   const BytecodeSourceInfo& source_info() const { return source_info_; }
   BytecodeSourceInfo& source_info() { return source_info_; }
@@ -124,7 +187,6 @@
 
   Bytecode bytecode_;
   uint32_t operands_[kMaxOperands];
-  OperandScale operand_scale_;
   BytecodeSourceInfo source_info_;
 };
 
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 9bdde9a..10afcdc 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -14,7 +14,8 @@
                                                        int allocation_base)
     : free_temporaries_(zone),
       allocation_base_(allocation_base),
-      allocation_count_(0) {}
+      allocation_count_(0),
+      observer_(nullptr) {}
 
 Register TemporaryRegisterAllocator::first_temporary_register() const {
   DCHECK(allocation_count() > 0);
@@ -26,6 +27,12 @@
   return Register(allocation_base() + allocation_count() - 1);
 }
 
+void TemporaryRegisterAllocator::set_observer(
+    TemporaryRegisterObserver* observer) {
+  DCHECK(observer_ == nullptr);
+  observer_ = observer;
+}
+
 int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
   allocation_count_ += 1;
   return allocation_base() + allocation_count() - 1;
@@ -140,6 +147,9 @@
 void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
   DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
   free_temporaries_.insert(reg_index);
+  if (observer_) {
+    observer_->TemporaryRegisterFreeEvent(Register(reg_index));
+  }
 }
 
 BytecodeRegisterAllocator::BytecodeRegisterAllocator(
@@ -156,7 +166,6 @@
   allocated_.clear();
 }
 
-
 Register BytecodeRegisterAllocator::NewRegister() {
   int allocated = -1;
   if (next_consecutive_count_ <= 0) {
@@ -170,7 +179,6 @@
   return Register(allocated);
 }
 
-
 bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
     Register reg) const {
   for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
@@ -179,7 +187,6 @@
   return false;
 }
 
-
 void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
   if (static_cast<int>(count) > next_consecutive_count_) {
     next_consecutive_register_ =
@@ -188,7 +195,6 @@
   }
 }
 
-
 Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
   DCHECK_GE(next_consecutive_register_, 0);
   DCHECK_GT(next_consecutive_count_, 0);
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index a4f6845..b8f737b 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -14,6 +14,7 @@
 
 class BytecodeArrayBuilder;
 class Register;
+class TemporaryRegisterObserver;
 
 class TemporaryRegisterAllocator final {
  public:
@@ -54,6 +55,9 @@
   // Returns the number of temporary register allocations made.
   int allocation_count() const { return allocation_count_; }
 
+  // Sets an observer for temporary register events.
+  void set_observer(TemporaryRegisterObserver* observer);
+
  private:
   // Allocate a temporary register.
   int AllocateTemporaryRegister();
@@ -61,10 +65,17 @@
   ZoneSet<int> free_temporaries_;
   int allocation_base_;
   int allocation_count_;
+  TemporaryRegisterObserver* observer_;
 
   DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
 };
 
+class TemporaryRegisterObserver {
+ public:
+  virtual ~TemporaryRegisterObserver() {}
+  virtual void TemporaryRegisterFreeEvent(Register reg) = 0;
+};
+
 // A class that allows the instantiator to allocate temporary registers that are
 // cleaned up when scope is closed.
 class BytecodeRegisterAllocator final {
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
new file mode 100644
index 0000000..ab25f95
--- /dev/null
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -0,0 +1,630 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+
+// A class for tracking the state of a register. This class tracks
+// which equivalence set a register is a member of and also whether a
+// register is materialized in the bytecode stream.
+class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
+ public:
+  RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
+      : register_(reg),
+        equivalence_id_(equivalence_id),
+        materialized_(materialized),
+        next_(this),
+        prev_(this) {}
+
+  void AddToEquivalenceSetOf(RegisterInfo* info);
+  void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
+  bool IsOnlyMemberOfEquivalenceSet() const;
+  bool IsOnlyMaterializedMemberOfEquivalenceSet() const;
+  bool IsInSameEquivalenceSet(RegisterInfo* info) const;
+
+  // Get a member of this register's equivalence set that is
+  // materialized. The materialized equivalent will be this register
+  // if it is materialized. Returns nullptr if no materialized
+  // equivalent exists.
+  RegisterInfo* GetMaterializedEquivalent();
+
+  // Get a member of this register's equivalence set that is
+  // materialized and not register |reg|. The materialized equivalent
+  // will be this register if it is materialized. Returns nullptr if
+  // no materialized equivalent exists.
+  RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
+
+  // Get a member of this register's equivalence set that is intended
+  // to be materialized in place of this register (which is currently
+  // materialized). The best candidate is deemed to be the register
+  // with the lowest index as this permits temporary registers to be
+  // removed from the bytecode stream. Returns nullptr if no candidate
+  // exists.
+  RegisterInfo* GetEquivalentToMaterialize();
+
+  // Get an equivalent register. Returns this if none exists.
+  RegisterInfo* GetEquivalent();
+
+  Register register_value() const { return register_; }
+  bool materialized() const { return materialized_; }
+  void set_materialized(bool materialized) { materialized_ = materialized; }
+  void set_equivalence_id(uint32_t equivalence_id) {
+    equivalence_id_ = equivalence_id;
+  }
+  uint32_t equivalence_id() const { return equivalence_id_; }
+
+ private:
+  Register register_;
+  uint32_t equivalence_id_;
+  bool materialized_;
+
+  // Equivalence set pointers.
+  RegisterInfo* next_;
+  RegisterInfo* prev_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
+};
+
+void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
+    RegisterInfo* info) {
+  DCHECK_NE(kInvalidEquivalenceId, info->equivalence_id());
+  // Fix old list
+  next_->prev_ = prev_;
+  prev_->next_ = next_;
+  // Add to new list.
+  next_ = info->next_;
+  prev_ = info;
+  prev_->next_ = this;
+  next_->prev_ = this;
+  set_equivalence_id(info->equivalence_id());
+  set_materialized(false);
+}
+
+void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet(
+    uint32_t equivalence_id, bool materialized) {
+  next_->prev_ = prev_;
+  prev_->next_ = next_;
+  next_ = prev_ = this;
+  equivalence_id_ = equivalence_id;
+  materialized_ = materialized;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet()
+    const {
+  return this->next_ == this;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::
+    IsOnlyMaterializedMemberOfEquivalenceSet() const {
+  DCHECK(materialized());
+
+  const RegisterInfo* visitor = this->next_;
+  while (visitor != this) {
+    if (visitor->materialized()) {
+      return false;
+    }
+    visitor = visitor->next_;
+  }
+  return true;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet(
+    RegisterInfo* info) const {
+  return equivalence_id() == info->equivalence_id();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
+  RegisterInfo* visitor = this;
+  do {
+    if (visitor->materialized()) {
+      return visitor;
+    }
+    visitor = visitor->next_;
+  } while (visitor != this);
+
+  return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan(
+    Register reg) {
+  RegisterInfo* visitor = this;
+  do {
+    if (visitor->materialized() && visitor->register_value() != reg) {
+      return visitor;
+    }
+    visitor = visitor->next_;
+  } while (visitor != this);
+
+  return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
+  DCHECK(this->materialized());
+  RegisterInfo* visitor = this->next_;
+  RegisterInfo* best_info = nullptr;
+  while (visitor != this) {
+    if (visitor->materialized()) {
+      return nullptr;
+    }
+    if (best_info == nullptr ||
+        visitor->register_value() < best_info->register_value()) {
+      best_info = visitor;
+    }
+    visitor = visitor->next_;
+  }
+  return best_info;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
+  return next_;
+}
+
+BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
+    Zone* zone, TemporaryRegisterAllocator* register_allocator,
+    int parameter_count, BytecodePipelineStage* next_stage)
+    : accumulator_(Register::virtual_accumulator()),
+      temporary_base_(register_allocator->allocation_base()),
+      register_info_table_(zone),
+      equivalence_id_(0),
+      next_stage_(next_stage),
+      flush_required_(false),
+      zone_(zone) {
+  register_allocator->set_observer(this);
+
+  // Calculate offset so register index values can be mapped into
+  // a vector of register metadata.
+  if (parameter_count != 0) {
+    register_info_table_offset_ =
+        -Register::FromParameterIndex(0, parameter_count).index();
+  } else {
+    // TODO(oth): This path shouldn't be necessary in bytecode generated
+    // from Javascript, but a set of tests do not include the JS receiver.
+    register_info_table_offset_ = -accumulator_.index();
+  }
+
+  // Initialize register map for parameters, locals, and the
+  // accumulator.
+  register_info_table_.resize(register_info_table_offset_ +
+                              static_cast<size_t>(temporary_base_.index()));
+  for (size_t i = 0; i < register_info_table_.size(); ++i) {
+    register_info_table_[i] = new (zone) RegisterInfo(
+        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
+    DCHECK_EQ(register_info_table_[i]->register_value().index(),
+              RegisterFromRegisterInfoTableIndex(i).index());
+  }
+  accumulator_info_ = GetRegisterInfo(accumulator_);
+  DCHECK(accumulator_info_->register_value() == accumulator_);
+}
+
+// override
+Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
+    int fixed_register_count, int parameter_count,
+    Handle<FixedArray> handler_table) {
+  FlushState();
+  return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
+                                      handler_table);
+}
+
+// override
+void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
+  //
+  // Transfers with observable registers as the destination will be
+  // immediately materialized so the source position information will
+  // be ordered correctly.
+  //
+  // Transfers without observable destination registers will initially
+  // be emitted as Nop's with the source position. They may, or may
+  // not, be materialized by the optimizer. However, the source
+  // position is not lost and being attached to a Nop is fine as the
+  // destination register is not observable in the debugger.
+  //
+  switch (node->bytecode()) {
+    case Bytecode::kLdar: {
+      DoLdar(node);
+      return;
+    }
+    case Bytecode::kStar: {
+      DoStar(node);
+      return;
+    }
+    case Bytecode::kMov: {
+      DoMov(node);
+      return;
+    }
+    default:
+      break;
+  }
+
+  if (Bytecodes::IsJump(node->bytecode()) ||
+      node->bytecode() == Bytecode::kDebugger ||
+      node->bytecode() == Bytecode::kSuspendGenerator) {
+    // All state must be flushed before emitting
+    // - a jump (due to how bytecode offsets for jumps are evaluated),
+    // - a call to the debugger (as it can manipulate locals and parameters),
+    // - a generator suspend (as this involves saving all registers).
+    FlushState();
+  }
+
+  PrepareOperands(node);
+  WriteToNextStage(node);
+}
+
+// override
+void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
+                                          BytecodeLabel* label) {
+  FlushState();
+  next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
+  FlushState();
+  next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
+                                          BytecodeLabel* label) {
+  // There is no need to flush here, it will have been flushed when |target|
+  // was bound.
+  next_stage_->BindLabel(target, label);
+}
+
+void BytecodeRegisterOptimizer::FlushState() {
+  if (!flush_required_) {
+    return;
+  }
+
+  // Materialize all live registers and break equivalences.
+  size_t count = register_info_table_.size();
+  for (size_t i = 0; i < count; ++i) {
+    RegisterInfo* reg_info = register_info_table_[i];
+    if (reg_info->materialized()) {
+      // Walk equivalents of materialized registers, materializing
+      // each equivalent register as necessary and placing in their
+      // own equivalence set.
+      RegisterInfo* equivalent;
+      while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
+        if (!equivalent->materialized()) {
+          OutputRegisterTransfer(reg_info, equivalent);
+        }
+        equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+      }
+    }
+  }
+
+  flush_required_ = false;
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
+  next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(
+    BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
+  if (source_info.is_valid()) {
+    node->source_info().Clone(source_info);
+  }
+  next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::OutputRegisterTransfer(
+    RegisterInfo* input_info, RegisterInfo* output_info,
+    const BytecodeSourceInfo& source_info) {
+  Register input = input_info->register_value();
+  Register output = output_info->register_value();
+  DCHECK_NE(input.index(), output.index());
+
+  if (input == accumulator_) {
+    uint32_t operand = static_cast<uint32_t>(output.ToOperand());
+    BytecodeNode node(Bytecode::kStar, operand);
+    WriteToNextStage(&node, source_info);
+  } else if (output == accumulator_) {
+    uint32_t operand = static_cast<uint32_t>(input.ToOperand());
+    BytecodeNode node(Bytecode::kLdar, operand);
+    WriteToNextStage(&node, source_info);
+  } else {
+    uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
+    uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
+    BytecodeNode node(Bytecode::kMov, operand0, operand1);
+    WriteToNextStage(&node, source_info);
+  }
+  output_info->set_materialized(true);
+}
+
+void BytecodeRegisterOptimizer::CreateMaterializedEquivalent(
+    RegisterInfo* info) {
+  DCHECK(info->materialized());
+  RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize();
+  if (unmaterialized) {
+    OutputRegisterTransfer(info, unmaterialized);
+  }
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
+  return info->materialized() ? info : info->GetMaterializedEquivalent();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
+    RegisterInfo* info) {
+  if (info->materialized()) {
+    return info;
+  }
+
+  RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_);
+  if (result == nullptr) {
+    Materialize(info);
+    result = info;
+  }
+  DCHECK(result->register_value() != accumulator_);
+  return result;
+}
+
+void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
+  if (!info->materialized()) {
+    RegisterInfo* materialized = info->GetMaterializedEquivalent();
+    OutputRegisterTransfer(materialized, info);
+  }
+}
+
+void BytecodeRegisterOptimizer::AddToEquivalenceSet(
+    RegisterInfo* set_member, RegisterInfo* non_set_member) {
+  non_set_member->AddToEquivalenceSetOf(set_member);
+  // Flushing is only required when two or more registers are placed
+  // in the same equivalence set.
+  flush_required_ = true;
+}
+
+void BytecodeRegisterOptimizer::RegisterTransfer(
+    RegisterInfo* input_info, RegisterInfo* output_info,
+    const BytecodeSourceInfo& source_info) {
+  // Materialize an alternate in the equivalence set that
+  // |output_info| is leaving.
+  if (output_info->materialized()) {
+    CreateMaterializedEquivalent(output_info);
+  }
+
+  // Add |output_info| to new equivalence set.
+  if (!output_info->IsInSameEquivalenceSet(input_info)) {
+    AddToEquivalenceSet(input_info, output_info);
+  }
+
+  bool output_is_observable =
+      RegisterIsObservable(output_info->register_value());
+  if (output_is_observable) {
+    // Force store to be emitted when register is observable.
+    output_info->set_materialized(false);
+    RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
+    OutputRegisterTransfer(materialized_info, output_info, source_info);
+  } else if (source_info.is_valid()) {
+    // Emit a placeholder nop to maintain source position info.
+    EmitNopForSourceInfo(source_info);
+  }
+}
+
+void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
+    const BytecodeSourceInfo& source_info) const {
+  DCHECK(source_info.is_valid());
+  BytecodeNode nop(Bytecode::kNop);
+  nop.source_info().Clone(source_info);
+  WriteToNextStage(&nop);
+}
+
+void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
+  Register input = GetRegisterInputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* input_info = GetRegisterInfo(input);
+  RegisterTransfer(input_info, accumulator_info_, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
+  Register input = GetRegisterInputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* input_info = GetRegisterInfo(input);
+  Register output = GetRegisterOutputOperand(
+      1, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+  RegisterTransfer(input_info, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
+  Register output = GetRegisterOutputOperand(
+      0, node->bytecode(), node->operands(), node->operand_count());
+  RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+  RegisterTransfer(accumulator_info_, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
+    RegisterInfo* reg_info) {
+  if (reg_info->materialized()) {
+    CreateMaterializedEquivalent(reg_info);
+  }
+  reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
+    Register start, int count) {
+  for (int i = 0; i < count; ++i) {
+    Register reg(start.index() + i);
+    RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+    PrepareRegisterOutputOperand(reg_info);
+  }
+}
+
+Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
+    Register reg) {
+  // For a temporary register, RegInfo state may need be created. For
+  // locals and parameters, the RegInfo state is created in the
+  // BytecodeRegisterOptimizer constructor.
+  RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+  if (reg_info->materialized()) {
+    return reg;
+  } else {
+    RegisterInfo* equivalent_info =
+        GetMaterializedEquivalentNotAccumulator(reg_info);
+    return equivalent_info->register_value();
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
+    BytecodeNode* const node, Register reg, int operand_index) {
+  Register equivalent = GetEquivalentRegisterForInputOperand(reg);
+  node->operands()[operand_index] =
+      static_cast<uint32_t>(equivalent.ToOperand());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
+                                                                 int count) {
+  for (int i = 0; i < count; ++i) {
+    Register current(start.index() + i);
+    RegisterInfo* input_info = GetRegisterInfo(current);
+    Materialize(input_info);
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOperands(
+    BytecodeNode* const node) {
+  //
+  // For each input operand, get a materialized equivalent if it is
+  // just a single register, otherwise materialize register range.
+  // Update operand_scale if necessary.
+  //
+  // For each output register about to be clobbered, materialize an
+  // equivalent if it exists. Put each register in it's own equivalence set.
+  //
+  int register_operand_bitmap =
+      Bytecodes::GetRegisterOperandBitmap(node->bytecode());
+  const OperandType* operand_types =
+      Bytecodes::GetOperandTypes(node->bytecode());
+  uint32_t* operands = node->operands();
+  for (int i = 0; register_operand_bitmap != 0;
+       ++i, register_operand_bitmap >>= 1) {
+    if ((register_operand_bitmap & 1) == 0) {
+      continue;
+    }
+    OperandType operand_type = operand_types[i];
+    int count = 0;
+    if (operand_types[i + 1] == OperandType::kRegCount) {
+      count = static_cast<int>(operands[i + 1]);
+      if (count == 0) {
+        continue;
+      }
+    } else {
+      count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+    }
+
+    Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+    if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+      if (count == 1) {
+        PrepareRegisterInputOperand(node, reg, i);
+      } else if (count > 1) {
+        PrepareRegisterRangeInputOperand(reg, count);
+      }
+    } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+      PrepareRegisterRangeOutputOperand(reg, count);
+    }
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
+  // Materialize the accumulator if it is read by the bytecode. The
+  // accumulator is special and no other register can be materialized
+  // in it's place.
+  if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+      !accumulator_info_->materialized()) {
+    Materialize(accumulator_info_);
+  }
+
+  // Materialize an equivalent to the accumulator if it will be
+  // clobbered when the bytecode is dispatched.
+  if (Bytecodes::WritesAccumulator(node->bytecode())) {
+    PrepareRegisterOutputOperand(accumulator_info_);
+  }
+}
+
+void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
+  PrepareAccumulator(node);
+  PrepareRegisterOperands(node);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
+    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+  DCHECK_LT(index, operand_count);
+  DCHECK(Bytecodes::IsRegisterInputOperandType(
+      Bytecodes::GetOperandType(bytecode, index)));
+  return OperandToRegister(operands[index]);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
+    int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+  DCHECK_LT(index, operand_count);
+  DCHECK(Bytecodes::IsRegisterOutputOperandType(
+      Bytecodes::GetOperandType(bytecode, index)));
+  return OperandToRegister(operands[index]);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  return (index < register_info_table_.size()) ? register_info_table_[index]
+                                               : nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  return index < register_info_table_.size() ? register_info_table_[index]
+                                             : NewRegisterInfo(reg);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
+  size_t index = GetRegisterInfoTableIndex(reg);
+  DCHECK_GE(index, register_info_table_.size());
+  GrowRegisterMap(reg);
+  return register_info_table_[index];
+}
+
+void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
+  DCHECK(RegisterIsTemporary(reg));
+  size_t index = GetRegisterInfoTableIndex(reg);
+  DCHECK_GE(index, register_info_table_.size());
+  size_t new_size = index + 1;
+  size_t old_size = register_info_table_.size();
+  register_info_table_.resize(new_size);
+  for (size_t i = old_size; i < new_size; ++i) {
+    register_info_table_[i] = new (zone()) RegisterInfo(
+        RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
+  }
+}
+
+void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
+  RegisterInfo* info = GetRegisterInfo(reg);
+  if (info != nullptr) {
+    // If register is materialized and part of equivalence set, make
+    // sure another member of the set holds the value before the
+    // temporary register is removed.
+    if (info->materialized()) {
+      CreateMaterializedEquivalent(info);
+    }
+    info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
+  }
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
new file mode 100644
index 0000000..4229610
--- /dev/null
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -0,0 +1,155 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating unnecessary transfers between
+// registers. The bytecode generator uses temporary registers
+// liberally for correctness and convenience and this stage removes
+// transfers that are not required and preserves correctness.
+class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
+                                        public TemporaryRegisterObserver,
+                                        public ZoneObject {
+ public:
+  BytecodeRegisterOptimizer(Zone* zone,
+                            TemporaryRegisterAllocator* register_allocator,
+                            int parameter_count,
+                            BytecodePipelineStage* next_stage);
+  virtual ~BytecodeRegisterOptimizer() {}
+
+  // BytecodePipelineStage interface.
+  void Write(BytecodeNode* node) override;
+  void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+  void BindLabel(BytecodeLabel* label) override;
+  void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+  Handle<BytecodeArray> ToBytecodeArray(
+      int fixed_register_count, int parameter_count,
+      Handle<FixedArray> handler_table) override;
+
+ private:
+  static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+
+  class RegisterInfo;
+
+  // TemporaryRegisterObserver interface.
+  void TemporaryRegisterFreeEvent(Register reg) override;
+
+  // Helpers for BytecodePipelineStage interface.
+  void FlushState();
+  void WriteToNextStage(BytecodeNode* node) const;
+  void WriteToNextStage(BytecodeNode* node,
+                        const BytecodeSourceInfo& output_info) const;
+
+  // Update internal state for register transfer from |input| to
+  // |output| using |source_info| as source position information if
+  // any bytecodes are emitted due to transfer.
+  void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
+                        const BytecodeSourceInfo& source_info);
+
+  // Emit a register transfer bytecode from |input| to |output|.
+  void OutputRegisterTransfer(
+      RegisterInfo* input, RegisterInfo* output,
+      const BytecodeSourceInfo& source_info = BytecodeSourceInfo());
+
+  // Emits a Nop to preserve source position information in the
+  // bytecode pipeline.
+  void EmitNopForSourceInfo(const BytecodeSourceInfo& source_info) const;
+
+  // Handlers for bytecode nodes for register to register transfers.
+  void DoLdar(const BytecodeNode* const node);
+  void DoMov(const BytecodeNode* const node);
+  void DoStar(const BytecodeNode* const node);
+
+  // Operand processing methods for bytecodes other than those
+  // performing register to register transfers.
+  void PrepareOperands(BytecodeNode* const node);
+  void PrepareAccumulator(BytecodeNode* const node);
+  void PrepareRegisterOperands(BytecodeNode* const node);
+
+  void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
+  void PrepareRegisterRangeOutputOperand(Register start, int count);
+  void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
+                                   int operand_index);
+  void PrepareRegisterRangeInputOperand(Register start, int count);
+
+  Register GetEquivalentRegisterForInputOperand(Register reg);
+
+  static Register GetRegisterInputOperand(int index, Bytecode bytecode,
+                                          const uint32_t* operands,
+                                          int operand_count);
+  static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
+                                           const uint32_t* operands,
+                                           int operand_count);
+
+  void CreateMaterializedEquivalent(RegisterInfo* info);
+  RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
+  RegisterInfo* GetMaterializedEquivalentNotAccumulator(RegisterInfo* info);
+  void Materialize(RegisterInfo* info);
+  void AddToEquivalenceSet(RegisterInfo* set_member,
+                           RegisterInfo* non_set_member);
+
+  // Methods for finding and creating metadata for each register.
+  RegisterInfo* GetOrCreateRegisterInfo(Register reg);
+  RegisterInfo* GetRegisterInfo(Register reg);
+  RegisterInfo* NewRegisterInfo(Register reg);
+  void GrowRegisterMap(Register reg);
+
+  bool RegisterIsTemporary(Register reg) const {
+    return reg >= temporary_base_;
+  }
+
+  bool RegisterIsObservable(Register reg) const {
+    return reg != accumulator_ && !RegisterIsTemporary(reg);
+  }
+
+  static Register OperandToRegister(uint32_t operand) {
+    return Register::FromOperand(static_cast<int32_t>(operand));
+  }
+
+  size_t GetRegisterInfoTableIndex(Register reg) const {
+    return static_cast<size_t>(reg.index() + register_info_table_offset_);
+  }
+
+  Register RegisterFromRegisterInfoTableIndex(size_t index) const {
+    return Register(static_cast<int>(index) - register_info_table_offset_);
+  }
+
+  uint32_t NextEquivalenceId() {
+    equivalence_id_++;
+    CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+    return equivalence_id_;
+  }
+
+  Zone* zone() { return zone_; }
+
+  const Register accumulator_;
+  RegisterInfo* accumulator_info_;
+  const Register temporary_base_;
+
+  // Direct mapping to register info.
+  ZoneVector<RegisterInfo*> register_info_table_;
+  int register_info_table_offset_;
+
+  // Counter for equivalence sets identifiers.
+  int equivalence_id_;
+
+  BytecodePipelineStage* next_stage_;
+  bool flush_required_;
+  Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterOptimizer);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index e7d1432..ea3d5d4 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -30,13 +30,15 @@
 
 template <OperandType>
 struct OperandTraits {
-  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+  typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfoTraits;
+  static const OperandTypeInfo kOperandTypeInfo = OperandTypeInfo::kNone;
 };
 
-#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)   \
-  template <>                                         \
-  struct OperandTraits<OperandType::k##Name> {        \
-    typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType)           \
+  template <>                                                 \
+  struct OperandTraits<OperandType::k##Name> {                \
+    typedef OperandTypeInfoTraits<InfoType> TypeInfoTraits;   \
+    static const OperandTypeInfo kOperandTypeInfo = InfoType; \
   };
 OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
 #undef DECLARE_OPERAND_TYPE_TRAITS
@@ -57,8 +59,8 @@
   };
 
   static const int kSize =
-      Helper<OperandTraits<operand_type>::TypeInfo::kIsScalable,
-             OperandTraits<operand_type>::TypeInfo::kUnscaledSize,
+      Helper<OperandTraits<operand_type>::TypeInfoTraits::kIsScalable,
+             OperandTraits<operand_type>::TypeInfoTraits::kUnscaledSize,
              operand_scale>::kSize;
   static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
 };
@@ -89,7 +91,16 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo,
+        OperandTraits<operand_2>::kOperandTypeInfo,
+        OperandTraits<operand_3>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -99,14 +110,13 @@
         OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_3, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -116,10 +126,10 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable |
-            OperandTraits<operand_2>::TypeInfo::kIsScalable |
-            OperandTraits<operand_3>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_3>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -145,7 +155,15 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo,
+        OperandTraits<operand_2>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -154,14 +172,13 @@
         OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -170,9 +187,9 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable |
-            OperandTraits<operand_2>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_2>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -196,7 +213,14 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo,
+        OperandTraits<operand_1>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
@@ -204,14 +228,13 @@
         OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
         OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -220,8 +243,8 @@
   }
 
   static inline bool IsScalable() {
-    return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
-            OperandTraits<operand_1>::TypeInfo::kIsScalable);
+    return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+            OperandTraits<operand_1>::TypeInfoTraits::kIsScalable);
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -241,21 +264,26 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTraits<operand_0>::kOperandTypeInfo, OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
     switch (operand_scale) {
 #define CASE(Name, _)                                                  \
   case OperandScale::k##Name: {                                        \
     static const OperandSize kOperandSizes[] = {                       \
         OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
     };                                                                 \
-    DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes));       \
-    return kOperandSizes[i];                                           \
+    return kOperandSizes;                                              \
   }
       OPERAND_SCALE_LIST(CASE)
 #undef CASE
     }
     UNREACHABLE();
-    return OperandSize::kNone;
+    return nullptr;
   }
 
   template <OperandType ot>
@@ -264,7 +292,7 @@
   }
 
   static inline bool IsScalable() {
-    return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+    return OperandTraits<operand_0>::TypeInfoTraits::kIsScalable;
   }
 
   static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -282,9 +310,14 @@
     return operand_types;
   }
 
-  static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
-    UNREACHABLE();
-    return OperandSize::kNone;
+  static const OperandTypeInfo* GetOperandTypeInfos() {
+    static const OperandTypeInfo operand_type_infos[] = {
+        OperandTypeInfo::kNone};
+    return operand_type_infos;
+  }
+
+  static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
+    return nullptr;
   }
 
   template <OperandType ot>
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 5a67847..44c5138 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -6,6 +6,7 @@
 
 #include <iomanip>
 
+#include "src/base/bits.h"
 #include "src/frames.h"
 #include "src/interpreter/bytecode-traits.h"
 #include "src/interpreter/interpreter.h"
@@ -100,14 +101,6 @@
   return "";
 }
 
-
-// static
-uint8_t Bytecodes::ToByte(Bytecode bytecode) {
-  DCHECK(bytecode <= Bytecode::kLast);
-  return static_cast<uint8_t>(bytecode);
-}
-
-
 // static
 Bytecode Bytecodes::FromByte(uint8_t value) {
   Bytecode bytecode = static_cast<Bytecode>(value);
@@ -115,7 +108,6 @@
   return bytecode;
 }
 
-
 // static
 Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
   DCHECK(!IsDebugBreak(bytecode));
@@ -148,7 +140,6 @@
   return size;
 }
 
-
 // static
 size_t Bytecodes::ReturnCount(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn ? 1 : 0;
@@ -168,7 +159,6 @@
   return 0;
 }
 
-
 // static
 int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
@@ -285,6 +275,34 @@
 }
 
 // static
+bool Bytecodes::IsJumpWithoutEffects(Bytecode bytecode) {
+  return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+}
+
+// static
+bool Bytecodes::IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+  switch (bytecode) {
+    case Bytecode::kMov:
+    case Bytecode::kPopContext:
+    case Bytecode::kPushContext:
+    case Bytecode::kStar:
+    case Bytecode::kLdrUndefined:
+      return true;
+    default:
+      return false;
+  }
+}
+
+// static
+bool Bytecodes::IsWithoutExternalSideEffects(Bytecode bytecode) {
+  // These bytecodes only manipulate interpreter frame state and will
+  // never throw.
+  return (IsAccumulatorLoadWithoutEffects(bytecode) ||
+          IsRegisterLoadWithoutEffects(bytecode) ||
+          bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK_LE(bytecode, Bytecode::kLast);
   DCHECK_LT(i, NumberOfOperands(bytecode));
@@ -307,18 +325,39 @@
 }
 
 // static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
-                                      OperandScale operand_scale) {
+const OperandTypeInfo* Bytecodes::GetOperandTypeInfos(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
 #define CASE(Name, ...)   \
   case Bytecode::k##Name: \
-    return BytecodeTraits<__VA_ARGS__>::GetOperandSize(i, operand_scale);
+    return BytecodeTraits<__VA_ARGS__>::GetOperandTypeInfos();
     BYTECODE_LIST(CASE)
 #undef CASE
   }
   UNREACHABLE();
-  return OperandSize::kNone;
+  return nullptr;
+}
+
+// static
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+                                      OperandScale operand_scale) {
+  DCHECK_LT(i, NumberOfOperands(bytecode));
+  return GetOperandSizes(bytecode, operand_scale)[i];
+}
+
+// static
+const OperandSize* Bytecodes::GetOperandSizes(Bytecode bytecode,
+                                              OperandScale operand_scale) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)   \
+  case Bytecode::k##Name: \
+    return BytecodeTraits<__VA_ARGS__>::GetOperandSizes(operand_scale);
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
 // static
@@ -574,7 +613,7 @@
   switch (operand_type) {
 #define CASE(Name, _)        \
   case OperandType::k##Name: \
-    return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
+    return OperandTraits<OperandType::k##Name>::TypeInfoTraits::kIsUnsigned;
     OPERAND_TYPE_LIST(CASE)
 #undef CASE
   }
@@ -584,9 +623,9 @@
 
 // static
 OperandSize Bytecodes::SizeForSignedOperand(int value) {
-  if (kMinInt8 <= value && value <= kMaxInt8) {
+  if (value >= kMinInt8 && value <= kMaxInt8) {
     return OperandSize::kByte;
-  } else if (kMinInt16 <= value && value <= kMaxInt16) {
+  } else if (value >= kMinInt16 && value <= kMaxInt16) {
     return OperandSize::kShort;
   } else {
     return OperandSize::kQuad;
@@ -594,8 +633,7 @@
 }
 
 // static
-OperandSize Bytecodes::SizeForUnsignedOperand(int value) {
-  DCHECK_GE(value, 0);
+OperandSize Bytecodes::SizeForUnsignedOperand(uint32_t value) {
   if (value <= kMaxUInt8) {
     return OperandSize::kByte;
   } else if (value <= kMaxUInt16) {
@@ -605,42 +643,6 @@
   }
 }
 
-OperandSize Bytecodes::SizeForUnsignedOperand(size_t value) {
-  if (value <= static_cast<size_t>(kMaxUInt8)) {
-    return OperandSize::kByte;
-  } else if (value <= static_cast<size_t>(kMaxUInt16)) {
-    return OperandSize::kShort;
-  } else if (value <= kMaxUInt32) {
-    return OperandSize::kQuad;
-  } else {
-    UNREACHABLE();
-    return OperandSize::kQuad;
-  }
-}
-
-OperandScale Bytecodes::OperandSizesToScale(OperandSize size0,
-                                            OperandSize size1,
-                                            OperandSize size2,
-                                            OperandSize size3) {
-  OperandSize upper = std::max(size0, size1);
-  OperandSize lower = std::max(size2, size3);
-  OperandSize result = std::max(upper, lower);
-  // Operand sizes have been scaled before calling this function.
-  // Currently all scalable operands are byte sized at
-  // OperandScale::kSingle.
-  STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
-                    static_cast<int>(OperandScale::kSingle) &&
-                static_cast<int>(OperandSize::kShort) ==
-                    static_cast<int>(OperandScale::kDouble) &&
-                static_cast<int>(OperandSize::kQuad) ==
-                    static_cast<int>(OperandScale::kQuadruple));
-  OperandScale operand_scale = static_cast<OperandScale>(result);
-  DCHECK(operand_scale == OperandScale::kSingle ||
-         operand_scale == OperandScale::kDouble ||
-         operand_scale == OperandScale::kQuadruple);
-  return operand_scale;
-}
-
 // static
 Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
                                           OperandType operand_type,
@@ -735,6 +737,7 @@
         break;
       case interpreter::OperandType::kIdx:
       case interpreter::OperandType::kRuntimeId:
+      case interpreter::OperandType::kIntrinsicId:
         os << "["
            << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
            << "]";
@@ -829,6 +832,10 @@
     (InterpreterFrameConstants::kRegisterFileFromFp -
      InterpreterFrameConstants::kBytecodeOffsetFromFp) /
     kPointerSize;
+static const int kCallerPCOffsetRegisterIndex =
+    (InterpreterFrameConstants::kRegisterFileFromFp -
+     InterpreterFrameConstants::kCallerPCOffsetFromFp) /
+    kPointerSize;
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
@@ -881,6 +888,11 @@
   return index() == kBytecodeOffsetRegisterIndex;
 }
 
+// static
+Register Register::virtual_accumulator() {
+  return Register(kCallerPCOffsetRegisterIndex);
+}
+
 OperandSize Register::SizeOfOperand() const {
   int32_t operand = ToOperand();
   if (operand >= kMinInt8 && operand <= kMaxInt8) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index d67a390..63a69f1 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -30,6 +30,7 @@
 
 #define SCALAR_OPERAND_TYPE_LIST(V)                   \
   V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
   V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
   V(Imm, OperandTypeInfo::kScalableSignedByte)        \
   V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
@@ -73,188 +74,197 @@
   DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
 // The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V)                                                      \
-  /* Extended width operands */                                               \
-  V(Wide, AccumulatorUse::kNone)                                              \
-  V(ExtraWide, AccumulatorUse::kNone)                                         \
-                                                                              \
-  /* Loading the accumulator */                                               \
-  V(LdaZero, AccumulatorUse::kWrite)                                          \
-  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                        \
-  V(LdaUndefined, AccumulatorUse::kWrite)                                     \
-  V(LdaNull, AccumulatorUse::kWrite)                                          \
-  V(LdaTheHole, AccumulatorUse::kWrite)                                       \
-  V(LdaTrue, AccumulatorUse::kWrite)                                          \
-  V(LdaFalse, AccumulatorUse::kWrite)                                         \
-  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                   \
-                                                                              \
-  /* Globals */                                                               \
-  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx)  \
-  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx,         \
-    OperandType::kIdx)                                                        \
-  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                \
-    OperandType::kIdx)                                                        \
-  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* Context operations */                                                    \
-  V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                 \
-  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                     \
-  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                \
-    OperandType::kIdx)                                                        \
-  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                 \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* Load-Store lookup slots */                                               \
-  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                 \
-  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)     \
-  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
-  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)       \
-                                                                              \
-  /* Register-accumulator transfers */                                        \
-  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                          \
-  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                        \
-                                                                              \
-  /* Register-register transfers */                                           \
-  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)      \
-                                                                              \
-  /* LoadIC operations */                                                     \
-  V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx,     \
-    OperandType::kIdx)                                                        \
-  V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg,               \
-    OperandType::kIdx)                                                        \
-                                                                              \
-  /* StoreIC operations */                                                    \
-  V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kIdx)                                     \
-  V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg,                  \
-    OperandType::kIdx, OperandType::kIdx)                                     \
-  V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg,             \
-    OperandType::kReg, OperandType::kIdx)                                     \
-  V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg,             \
-    OperandType::kReg, OperandType::kIdx)                                     \
-                                                                              \
-  /* Binary Operators */                                                      \
-  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                       \
-  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                \
-  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)         \
-                                                                              \
-  /* Unary Operators */                                                       \
-  V(Inc, AccumulatorUse::kReadWrite)                                          \
-  V(Dec, AccumulatorUse::kReadWrite)                                          \
-  V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite)                          \
-  V(LogicalNot, AccumulatorUse::kReadWrite)                                   \
-  V(TypeOf, AccumulatorUse::kReadWrite)                                       \
-  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)      \
-  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)      \
-                                                                              \
-  /* Call operations */                                                       \
-  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,       \
-    OperandType::kRegCount, OperandType::kIdx)                                \
-  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
-    OperandType::kRegCount, OperandType::kIdx)                                \
-  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,             \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,       \
-    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
-  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                 \
-    OperandType::kReg, OperandType::kRegCount)                                \
-                                                                              \
-  /* Intrinsics */                                                            \
-  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId,         \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-                                                                              \
-  /* New operator */                                                          \
-  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                       \
-    OperandType::kMaybeReg, OperandType::kRegCount)                           \
-                                                                              \
-  /* Test Operators */                                                        \
-  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
-  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)              \
-  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)           \
-  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)              \
-  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)           \
-  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)       \
-  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)    \
-  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)            \
-  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                    \
-                                                                              \
-  /* Cast operators */                                                        \
-  V(ToName, AccumulatorUse::kReadWrite)                                       \
-  V(ToNumber, AccumulatorUse::kReadWrite)                                     \
-  V(ToObject, AccumulatorUse::kReadWrite)                                     \
-                                                                              \
-  /* Literals */                                                              \
-  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,           \
-    OperandType::kIdx, OperandType::kFlag8)                                   \
-                                                                              \
-  /* Closure allocation */                                                    \
-  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                 \
-    OperandType::kFlag8)                                                      \
-                                                                              \
-  /* Arguments allocation */                                                  \
-  V(CreateMappedArguments, AccumulatorUse::kWrite)                            \
-  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                          \
-  V(CreateRestParameter, AccumulatorUse::kWrite)                              \
-                                                                              \
-  /* Control Flow */                                                          \
-  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                           \
-  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                   \
-  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                     \
-  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
-  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                    \
-  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)            \
-  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)            \
-  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
-  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)           \
-  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)   \
-  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                     \
-  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
-  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                \
-  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
-  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                  \
-  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)          \
-                                                                              \
-  /* Complex flow control For..in */                                          \
-  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)          \
-  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)  \
-  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,  \
-    OperandType::kRegPair, OperandType::kIdx)                                 \
-  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                     \
-                                                                              \
-  /* Perform a stack guard check */                                           \
-  V(StackCheck, AccumulatorUse::kNone)                                        \
-                                                                              \
-  /* Non-local flow control */                                                \
-  V(Throw, AccumulatorUse::kRead)                                             \
-  V(ReThrow, AccumulatorUse::kRead)                                           \
-  V(Return, AccumulatorUse::kRead)                                            \
-                                                                              \
-  /* Generators */                                                            \
-  V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg)               \
-  V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg)               \
-                                                                              \
-  /* Debugger */                                                              \
-  V(Debugger, AccumulatorUse::kNone)                                          \
-  DEBUG_BREAK_BYTECODE_LIST(V)                                                \
-                                                                              \
-  /* Illegal bytecode (terminates execution) */                               \
-  V(Illegal, AccumulatorUse::kNone)                                           \
-                                                                              \
-  /* No operation (used to maintain source positions for peephole */          \
-  /* eliminated bytecodes). */                                                \
+#define BYTECODE_LIST(V)                                                       \
+  /* Extended width operands */                                                \
+  V(Wide, AccumulatorUse::kNone)                                               \
+  V(ExtraWide, AccumulatorUse::kNone)                                          \
+                                                                               \
+  /* Loading the accumulator */                                                \
+  V(LdaZero, AccumulatorUse::kWrite)                                           \
+  V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm)                         \
+  V(LdaUndefined, AccumulatorUse::kWrite)                                      \
+  V(LdaNull, AccumulatorUse::kWrite)                                           \
+  V(LdaTheHole, AccumulatorUse::kWrite)                                        \
+  V(LdaTrue, AccumulatorUse::kWrite)                                           \
+  V(LdaFalse, AccumulatorUse::kWrite)                                          \
+  V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                    \
+                                                                               \
+  /* Loading registers */                                                      \
+  V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut)                 \
+                                                                               \
+  /* Globals */                                                                \
+  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx)                      \
+  V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
+  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)          \
+  V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                 \
+    OperandType::kIdx)                                                         \
+  V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                 \
+    OperandType::kIdx)                                                         \
+                                                                               \
+  /* Context operations */                                                     \
+  V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut)                  \
+  V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                      \
+  V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                 \
+    OperandType::kIdx)                                                         \
+  V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg,                  \
+    OperandType::kIdx, OperandType::kRegOut)                                   \
+  V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                  \
+    OperandType::kIdx)                                                         \
+                                                                               \
+  /* Load-Store lookup slots */                                                \
+  V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx)                  \
+  V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)      \
+  V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
+  V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx)        \
+                                                                               \
+  /* Register-accumulator transfers */                                         \
+  V(Ldar, AccumulatorUse::kWrite, OperandType::kReg)                           \
+  V(Star, AccumulatorUse::kRead, OperandType::kRegOut)                         \
+                                                                               \
+  /* Register-register transfers */                                            \
+  V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut)       \
+                                                                               \
+  /* Property loads (LoadIC) operations */                                     \
+  V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg,               \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg,                \
+    OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut)                \
+  V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg,           \
+    OperandType::kIdx)                                                         \
+  V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg,                \
+    OperandType::kIdx, OperandType::kRegOut)                                   \
+                                                                               \
+  /* Propery stores (StoreIC) operations */                                    \
+  V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kIdx, OperandType::kIdx)                                      \
+  V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kReg, OperandType::kIdx)                                      \
+  V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
+    OperandType::kReg, OperandType::kIdx)                                      \
+                                                                               \
+  /* Binary Operators */                                                       \
+  V(Add, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Div, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg)                        \
+  V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg)                 \
+  V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg)          \
+                                                                               \
+  /* Unary Operators */                                                        \
+  V(Inc, AccumulatorUse::kReadWrite)                                           \
+  V(Dec, AccumulatorUse::kReadWrite)                                           \
+  V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite)                           \
+  V(LogicalNot, AccumulatorUse::kReadWrite)                                    \
+  V(TypeOf, AccumulatorUse::kReadWrite)                                        \
+  V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+  V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)       \
+                                                                               \
+  /* Call operations */                                                        \
+  V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,        \
+    OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,    \
+    OperandType::kRegCount, OperandType::kIdx)                                 \
+  V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,              \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+  V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId,        \
+    OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair)  \
+  V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx,                  \
+    OperandType::kReg, OperandType::kRegCount)                                 \
+                                                                               \
+  /* Intrinsics */                                                             \
+  V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId,        \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+                                                                               \
+  /* New operator */                                                           \
+  V(New, AccumulatorUse::kReadWrite, OperandType::kReg,                        \
+    OperandType::kMaybeReg, OperandType::kRegCount)                            \
+                                                                               \
+  /* Test Operators */                                                         \
+  V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg)                  \
+  V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg)               \
+  V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg)               \
+  V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg)            \
+  V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)        \
+  V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg)     \
+  V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)             \
+  V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                     \
+                                                                               \
+  /* Cast operators */                                                         \
+  V(ToName, AccumulatorUse::kReadWrite)                                        \
+  V(ToNumber, AccumulatorUse::kReadWrite)                                      \
+  V(ToObject, AccumulatorUse::kReadWrite)                                      \
+                                                                               \
+  /* Literals */                                                               \
+  V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+  V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx,             \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+  V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx,            \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
+                                                                               \
+  /* Closure allocation */                                                     \
+  V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                  \
+    OperandType::kFlag8)                                                       \
+                                                                               \
+  /* Arguments allocation */                                                   \
+  V(CreateMappedArguments, AccumulatorUse::kWrite)                             \
+  V(CreateUnmappedArguments, AccumulatorUse::kWrite)                           \
+  V(CreateRestParameter, AccumulatorUse::kWrite)                               \
+                                                                               \
+  /* Control Flow */                                                           \
+  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                            \
+  V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                    \
+  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                      \
+  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
+  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                     \
+  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)             \
+  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)     \
+  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)            \
+  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
+  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                      \
+  V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
+  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                 \
+  V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)         \
+  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                   \
+  V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)           \
+                                                                               \
+  /* Complex flow control For..in */                                           \
+  V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple)           \
+  V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg)   \
+  V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg,   \
+    OperandType::kRegPair, OperandType::kIdx)                                  \
+  V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg)                      \
+                                                                               \
+  /* Perform a stack guard check */                                            \
+  V(StackCheck, AccumulatorUse::kNone)                                         \
+                                                                               \
+  /* Non-local flow control */                                                 \
+  V(Throw, AccumulatorUse::kRead)                                              \
+  V(ReThrow, AccumulatorUse::kRead)                                            \
+  V(Return, AccumulatorUse::kRead)                                             \
+                                                                               \
+  /* Generators */                                                             \
+  V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg)                \
+  V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg)                \
+                                                                               \
+  /* Debugger */                                                               \
+  V(Debugger, AccumulatorUse::kNone)                                           \
+  DEBUG_BREAK_BYTECODE_LIST(V)                                                 \
+                                                                               \
+  /* Illegal bytecode (terminates execution) */                                \
+  V(Illegal, AccumulatorUse::kNone)                                            \
+                                                                               \
+  /* No operation (used to maintain source positions for peephole */           \
+  /* eliminated bytecodes). */                                                 \
   V(Nop, AccumulatorUse::kNone)
 
 enum class AccumulatorUse : uint8_t {
@@ -376,6 +386,11 @@
   static Register bytecode_offset();
   bool is_bytecode_offset() const;
 
+  // Returns a register that can be used to represent the accumulator
+  // within code in the interpreter, but should never be emitted in
+  // bytecode.
+  static Register virtual_accumulator();
+
   OperandSize SizeOfOperand() const;
 
   int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
@@ -442,7 +457,10 @@
   static const char* OperandSizeToString(OperandSize operand_size);
 
   // Returns byte value of bytecode.
-  static uint8_t ToByte(Bytecode bytecode);
+  static uint8_t ToByte(Bytecode bytecode) {
+    DCHECK_LE(bytecode, Bytecode::kLast);
+    return static_cast<uint8_t>(bytecode);
+  }
 
   // Returns bytecode for |value|.
   static Bytecode FromByte(uint8_t value);
@@ -476,10 +494,22 @@
   // Return true if |bytecode| writes the accumulator with a boolean value.
   static bool WritesBooleanToAccumulator(Bytecode bytecode);
 
-  // Return true if |bytecode| is an accumulator load bytecode,
+  // Return true if |bytecode| is an accumulator load without effects,
   // e.g. LdaConstant, LdaTrue, Ldar.
   static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
 
+  // Return true if |bytecode| is a jump without effects,
+  // e.g.  any jump excluding those that include type coercion like
+  // JumpIfTrueToBoolean.
+  static bool IsJumpWithoutEffects(Bytecode bytecode);
+
+  // Return true if |bytecode| is a register load without effects,
+  // e.g. Mov, Star, LdrUndefined.
+  static bool IsRegisterLoadWithoutEffects(Bytecode bytecode);
+
+  // Returns true if |bytecode| has no effects.
+  static bool IsWithoutExternalSideEffects(Bytecode bytecode);
+
   // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
@@ -487,10 +517,18 @@
   // OperandType::kNone.
   static const OperandType* GetOperandTypes(Bytecode bytecode);
 
+  // Returns a pointer to an array of operand type info terminated in
+  // OperandTypeInfo::kNone.
+  static const OperandTypeInfo* GetOperandTypeInfos(Bytecode bytecode);
+
   // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i,
                                     OperandScale operand_scale);
 
+  // Returns a pointer to an array of the operand sizes for |bytecode|.
+  static const OperandSize* GetOperandSizes(Bytecode bytecode,
+                                            OperandScale operand_scale);
+
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
   static int GetOperandOffset(Bytecode bytecode, int i,
@@ -617,17 +655,7 @@
   static OperandSize SizeForSignedOperand(int value);
 
   // Return the operand size required to hold an unsigned operand.
-  static OperandSize SizeForUnsignedOperand(int value);
-
-  // Return the operand size required to hold an unsigned operand.
-  static OperandSize SizeForUnsignedOperand(size_t value);
-
-  // Return the OperandScale required for bytecode emission of
-  // operand sizes.
-  static OperandScale OperandSizesToScale(
-      OperandSize size0, OperandSize size1 = OperandSize::kByte,
-      OperandSize size2 = OperandSize::kByte,
-      OperandSize size3 = OperandSize::kByte);
+  static OperandSize SizeForUnsignedOperand(uint32_t value);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 8778b26..b72d6d5 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -7,6 +7,7 @@
 
 #include "src/interpreter/bytecode-array-builder.h"
 
+#include "src/interpreter/bytecode-label.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 4e911eb..ee5f8be 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -31,6 +31,7 @@
                         Bytecodes::ReturnCount(bytecode)),
       bytecode_(bytecode),
       operand_scale_(operand_scale),
+      interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
       accumulator_(this, MachineRepresentation::kTagged),
       accumulator_use_(AccumulatorUse::kNone),
       made_call_(false),
@@ -50,6 +51,13 @@
   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
 }
 
+Node* InterpreterAssembler::GetInterpretedFramePointer() {
+  if (!interpreted_frame_pointer_.IsBound()) {
+    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+  }
+  return interpreted_frame_pointer_.value();
+}
+
 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
   return accumulator_.value();
 }
@@ -93,7 +101,8 @@
 }
 
 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return IntPtrAdd(LoadParentFramePointer(), RegisterFrameOffset(reg_index));
+  return IntPtrAdd(GetInterpretedFramePointer(),
+                   RegisterFrameOffset(reg_index));
 }
 
 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
@@ -101,24 +110,24 @@
 }
 
 Node* InterpreterAssembler::LoadRegister(Register reg) {
-  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
               IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
 }
 
 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
-  return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
               RegisterFrameOffset(reg_index));
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
   return StoreNoWriteBarrier(
-      MachineRepresentation::kTagged, LoadParentFramePointer(),
+      MachineRepresentation::kTagged, GetInterpretedFramePointer(),
       IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
 }
 
 Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
   return StoreNoWriteBarrier(MachineRepresentation::kTagged,
-                             LoadParentFramePointer(),
+                             GetInterpretedFramePointer(),
                              RegisterFrameOffset(reg_index), value);
 }
 
@@ -363,6 +372,15 @@
   return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
+Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
+  DCHECK(OperandType::kIntrinsicId ==
+         Bytecodes::GetOperandType(bytecode_, operand_index));
+  OperandSize operand_size =
+      Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+  DCHECK_EQ(operand_size, OperandSize::kByte);
+  return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
   Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
                                         BytecodeArray::kConstantPoolOffset);
@@ -394,10 +412,9 @@
 
 Node* InterpreterAssembler::LoadTypeFeedbackVector() {
   Node* function = LoadRegister(Register::function_closure());
-  Node* shared_info =
-      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
   Node* vector =
-      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+      LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
   return vector;
 }
 
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index f8d4b7c..183d4dd 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -41,6 +41,9 @@
   // Returns the runtime id immediate for bytecode operand
   // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandRuntimeId(int operand_index);
+  // Returns the intrinsic id immediate for bytecode operand
+  // |operand_index| in the current bytecode.
+  compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
 
   // Accumulator.
   compiler::Node* GetAccumulator();
@@ -146,6 +149,9 @@
   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                            BailoutReason bailout_reason);
 
+  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+  compiler::Node* BytecodeOffset();
+
  protected:
   Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
@@ -153,8 +159,7 @@
  private:
   // Returns a tagged pointer to the current function's BytecodeArray object.
   compiler::Node* BytecodeArrayTaggedPointer();
-  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
-  compiler::Node* BytecodeOffset();
+
   // Returns a raw pointer to first entry in the interpreter dispatch table.
   compiler::Node* DispatchTableRawPointer();
 
@@ -163,6 +168,10 @@
   // tracing as these need to bypass accumulator use validity checks.
   compiler::Node* GetAccumulatorUnchecked();
 
+  // Returns the frame pointer for the interpreted frame of the function being
+  // interpreted.
+  compiler::Node* GetInterpretedFramePointer();
+
   // Saves and restores interpreter bytecode offset to the interpreter stack
   // frame when performing a call.
   void CallPrologue() override;
@@ -229,6 +238,7 @@
 
   Bytecode bytecode_;
   OperandScale operand_scale_;
+  CodeStubAssembler::Variable interpreted_frame_pointer_;
   CodeStubAssembler::Variable accumulator_;
   AccumulatorUse accumulator_use_;
   bool made_call_;
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
index 6d9917d..109bf8e 100644
--- a/src/interpreter/interpreter-intrinsics.cc
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -4,6 +4,8 @@
 
 #include "src/interpreter/interpreter-intrinsics.h"
 
+#include "src/code-factory.h"
+
 namespace v8 {
 namespace internal {
 namespace interpreter {
@@ -13,8 +15,11 @@
 #define __ assembler_->
 
 IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
-    : assembler_(assembler) {}
+    : isolate_(assembler->isolate()),
+      zone_(assembler->zone()),
+      assembler_(assembler) {}
 
+// static
 bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
   switch (function_id) {
 #define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
@@ -26,6 +31,36 @@
   }
 }
 
+// static
+IntrinsicsHelper::IntrinsicId IntrinsicsHelper::FromRuntimeId(
+    Runtime::FunctionId function_id) {
+  switch (function_id) {
+#define TO_RUNTIME_ID(name, lower_case, count) \
+  case Runtime::kInline##name:                 \
+    return IntrinsicId::k##name;
+    INTRINSICS_LIST(TO_RUNTIME_ID)
+#undef TO_RUNTIME_ID
+    default:
+      UNREACHABLE();
+      return static_cast<IntrinsicsHelper::IntrinsicId>(-1);
+  }
+}
+
+// static
+Runtime::FunctionId IntrinsicsHelper::ToRuntimeId(
+    IntrinsicsHelper::IntrinsicId intrinsic_id) {
+  switch (intrinsic_id) {
+#define TO_INTRINSIC_ID(name, lower_case, count) \
+  case IntrinsicId::k##name:                     \
+    return Runtime::kInline##name;
+    INTRINSICS_LIST(TO_INTRINSIC_ID)
+#undef TO_INTRINSIC_ID
+    default:
+      UNREACHABLE();
+      return static_cast<Runtime::FunctionId>(-1);
+  }
+}
+
 Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
                                         Node* first_arg_reg, Node* arg_count) {
   InterpreterAssembler::Label abort(assembler_), end(assembler_);
@@ -42,25 +77,27 @@
 #undef LABEL_POINTER
 
 #define CASE(name, lower_case, count) \
-  static_cast<int32_t>(Runtime::kInline##name),
+  static_cast<int32_t>(IntrinsicId::k##name),
   int32_t cases[] = {INTRINSICS_LIST(CASE)};
 #undef CASE
 
   __ Switch(function_id, &abort, cases, labels, arraysize(cases));
 #define HANDLE_CASE(name, lower_case, expected_arg_count)   \
   __ Bind(&lower_case);                                     \
-  if (FLAG_debug_code) {                                    \
+  if (FLAG_debug_code && expected_arg_count >= 0) {         \
     AbortIfArgCountMismatch(expected_arg_count, arg_count); \
   }                                                         \
-  result.Bind(name(first_arg_reg));                         \
+  result.Bind(name(first_arg_reg, arg_count, context));     \
   __ Goto(&end);
   INTRINSICS_LIST(HANDLE_CASE)
 #undef HANDLE_CASE
 
   __ Bind(&abort);
-  __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
-  result.Bind(__ UndefinedConstant());
-  __ Goto(&end);
+  {
+    __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+    result.Bind(__ UndefinedConstant());
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return result.value();
@@ -74,84 +111,246 @@
 
   InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
       end(assembler_);
-  Node* condition;
   if (mode == kInstanceTypeEqual) {
-    condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+    return __ Word32Equal(instance_type, __ Int32Constant(type));
   } else {
     DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
-    condition =
-        __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+    return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
   }
-  __ Branch(condition, &if_true, &if_false);
+}
 
-  __ Bind(&if_true);
-  return_value.Bind(__ BooleanConstant(true));
-  __ Goto(&end);
+Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
+      return_false(assembler_), end(assembler_);
+  Node* arg = __ LoadRegister(input);
+  __ GotoIf(__ WordIsSmi(arg), &return_false);
 
-  __ Bind(&if_false);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
+  Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
+  __ Branch(condition, &return_true, &return_false);
+
+  __ Bind(&return_true);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&return_false);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
-Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
+                                     Node* context) {
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
-
-  InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+  InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
       end(assembler_);
+
   Node* arg = __ LoadRegister(input);
+  __ GotoIf(__ WordIsSmi(arg), &return_false);
 
-  __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
-  __ Bind(&if_smi);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
-
-  __ Bind(&if_not_smi);
   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-  return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
-                                        kInstanceTypeGreaterThanOrEqual));
-  __ Goto(&end);
+  Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+                                        kInstanceTypeGreaterThanOrEqual);
+  __ Branch(condition, &return_true, &return_false);
+
+  __ Bind(&return_true);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&return_false);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
-Node* IntrinsicsHelper::IsArray(Node* input) {
+Node* IntrinsicsHelper::IsArray(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_PROXY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
+  return IsInstanceType(input, JS_REGEXP_TYPE);
+}
+
+Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
+                                     Node* context) {
+  return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
-
   InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
       end(assembler_);
+
   Node* arg = __ LoadRegister(input);
 
   __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
   __ Bind(&if_smi);
-  return_value.Bind(__ BooleanConstant(false));
-  __ Goto(&end);
+  {
+    return_value.Bind(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
 
   __ Bind(&if_not_smi);
-  return_value.Bind(
-      CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
-  __ Goto(&end);
+  {
+    return_value.Bind(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
 
   __ Bind(&end);
   return return_value.value();
 }
 
+Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
+                                            Callable const& callable) {
+  int param_count = callable.descriptor().GetParameterCount();
+  Node** args = zone()->NewArray<Node*>(param_count + 1);  // 1 for context
+  for (int i = 0; i < param_count; i++) {
+    args[i] = __ LoadRegister(args_reg);
+    args_reg = __ NextRegister(args_reg);
+  }
+  args[param_count] = context;
+
+  return __ CallStubN(callable, args);
+}
+
+Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
+                                    Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::HasProperty(isolate()));
+}
+
+Node* IntrinsicsHelper::MathPow(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::MathPow(isolate()));
+}
+
+Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::FastNewObject(isolate()));
+}
+
+Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
+                                       Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::NumberToString(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
+                                              Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::RegExpConstructResult(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
+                                   Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::RegExpExec(isolate()));
+}
+
+Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToString(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToName(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToName(isolate()));
+}
+
+Node* IntrinsicsHelper::ToLength(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate()));
+}
+
+Node* IntrinsicsHelper::ToInteger(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate()));
+}
+
+Node* IntrinsicsHelper::ToNumber(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate()));
+}
+
+Node* IntrinsicsHelper::ToObject(Node* input, Node* arg_count, Node* context) {
+  return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate()));
+}
+
+Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) {
+  // First argument register contains the function target.
+  Node* function = __ LoadRegister(args_reg);
+
+  // Receiver is the second runtime call argument.
+  Node* receiver_reg = __ NextRegister(args_reg);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+
+  // Subtract function and receiver from arg count.
+  Node* function_and_receiver_count = __ Int32Constant(2);
+  Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count);
+
+  if (FLAG_debug_code) {
+    InterpreterAssembler::Label arg_count_positive(assembler_);
+    Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
+    __ GotoUnless(comparison, &arg_count_positive);
+    __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+    __ Goto(&arg_count_positive);
+    __ Bind(&arg_count_positive);
+  }
+
+  Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
+                           TailCallMode::kDisallow);
+  return result;
+}
+
+Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
+                                Node* context) {
+  InterpreterAssembler::Variable return_value(assembler_,
+                                              MachineRepresentation::kTagged);
+  InterpreterAssembler::Label done(assembler_);
+
+  Node* object = __ LoadRegister(args_reg);
+  return_value.Bind(object);
+
+  // If the object is a smi return the object.
+  __ GotoIf(__ WordIsSmi(object), &done);
+
+  // If the object is not a value type, return the object.
+  Node* condition =
+      CompareInstanceType(object, JS_VALUE_TYPE, kInstanceTypeEqual);
+  __ GotoUnless(condition, &done);
+
+  // If the object is a value type, return the value field.
+  return_value.Bind(__ LoadObjectField(object, JSValue::kValueOffset));
+  __ Goto(&done);
+
+  __ Bind(&done);
+  return return_value.value();
+}
+
 void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
-  InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
-      end(assembler_);
+  InterpreterAssembler::Label match(assembler_);
   Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
-  __ Branch(comparison, &match, &mismatch);
-  __ Bind(&mismatch);
+  __ GotoIf(comparison, &match);
   __ Abort(kWrongArgumentCountForInvokeIntrinsic);
-  __ Goto(&end);
+  __ Goto(&match);
   __ Bind(&match);
-  __ Goto(&end);
-  __ Bind(&end);
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
index e27c678..b1c0cdc 100644
--- a/src/interpreter/interpreter-intrinsics.h
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -20,14 +20,43 @@
 class Node;
 }  // namespace compiler
 
-#define INTRINSICS_LIST(V)           \
-  V(IsJSReceiver, is_js_receiver, 1) \
-  V(IsArray, is_array, 1)
-
 namespace interpreter {
 
+// List of supported intrisics, with upper case name, lower case name and
+// expected number of arguments (-1 denoting argument count is variable).
+#define INTRINSICS_LIST(V)                              \
+  V(Call, call, -1)                                     \
+  V(HasProperty, has_property, 2)                       \
+  V(IsArray, is_array, 1)                               \
+  V(IsJSProxy, is_js_proxy, 1)                          \
+  V(IsJSReceiver, is_js_receiver, 1)                    \
+  V(IsRegExp, is_regexp, 1)                             \
+  V(IsSmi, is_smi, 1)                                   \
+  V(IsTypedArray, is_typed_array, 1)                    \
+  V(MathPow, math_pow, 2)                               \
+  V(NewObject, new_object, 2)                           \
+  V(NumberToString, number_to_string, 1)                \
+  V(RegExpConstructResult, reg_exp_construct_result, 3) \
+  V(RegExpExec, reg_exp_exec, 4)                        \
+  V(SubString, sub_string, 3)                           \
+  V(ToString, to_string, 1)                             \
+  V(ToName, to_name, 1)                                 \
+  V(ToLength, to_length, 1)                             \
+  V(ToInteger, to_integer, 1)                           \
+  V(ToNumber, to_number, 1)                             \
+  V(ToObject, to_object, 1)                             \
+  V(ValueOf, value_of, 1)
+
 class IntrinsicsHelper {
  public:
+  enum class IntrinsicId {
+#define DECLARE_INTRINSIC_ID(name, lower_case, count) k##name,
+    INTRINSICS_LIST(DECLARE_INTRINSIC_ID)
+#undef DECLARE_INTRINSIC_ID
+        kIdCount
+  };
+  STATIC_ASSERT(static_cast<uint32_t>(IntrinsicId::kIdCount) <= kMaxUInt8);
+
   explicit IntrinsicsHelper(InterpreterAssembler* assembler);
 
   compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
@@ -36,22 +65,36 @@
                                   compiler::Node* arg_count);
 
   static bool IsSupported(Runtime::FunctionId function_id);
+  static IntrinsicId FromRuntimeId(Runtime::FunctionId function_id);
+  static Runtime::FunctionId ToRuntimeId(IntrinsicId intrinsic_id);
 
  private:
   enum InstanceTypeCompareMode {
     kInstanceTypeEqual,
     kInstanceTypeGreaterThanOrEqual
   };
+
+  compiler::Node* IsInstanceType(compiler::Node* input, int type);
   compiler::Node* CompareInstanceType(compiler::Node* map, int type,
                                       InstanceTypeCompareMode mode);
+  compiler::Node* IntrinsicAsStubCall(compiler::Node* input,
+                                      compiler::Node* context,
+                                      Callable const& callable);
   void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
-  InterpreterAssembler* assembler_;
 
-#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
-  compiler::Node* name(compiler::Node* input);
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count)                \
+  compiler::Node* name(compiler::Node* input, compiler::Node* arg_count, \
+                       compiler::Node* context);
   INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
 #undef DECLARE_INTRINSIC_HELPER
 
+  Isolate* isolate() { return isolate_; }
+  Zone* zone() { return zone_; }
+
+  Isolate* isolate_;
+  Zone* zone_;
+  InterpreterAssembler* assembler_;
+
   DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
 };
 
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index a42da50..8a05777 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -62,10 +62,11 @@
       size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
       dispatch_table_[index] = code->entry();                                  \
       TraceCodegen(code);                                                      \
-      LOG_CODE_EVENT(                                                          \
+      PROFILE(                                                                 \
           isolate_,                                                            \
           CodeCreateEvent(                                                     \
-              Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code),         \
+              CodeEventListener::BYTECODE_HANDLER_TAG,                         \
+              AbstractCode::cast(*code),                                       \
               Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
     }                                                                          \
   }
@@ -180,9 +181,8 @@
 bool Interpreter::IsDispatchTableInitialized() {
   if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
       FLAG_trace_ignition_dispatches) {
-    // Regenerate table to add bytecode tracing operations,
-    // print the assembly code generated by TurboFan,
-    // or instrument handlers with dispatch counters.
+    // Regenerate table to add bytecode tracing operations, print the assembly
+    // code generated by TurboFan or instrument handlers with dispatch counters.
     return false;
   }
   return dispatch_table_[0] != nullptr;
@@ -250,7 +250,8 @@
                                     NewStringType::kNormal)
                 .ToLocalChecked();
         Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
-        CHECK(counters_row->Set(context, to_name_object, counter_object)
+        CHECK(counters_row
+                  ->DefineOwnProperty(context, to_name_object, counter_object)
                   .IsJust());
       }
     }
@@ -261,7 +262,9 @@
                                 NewStringType::kNormal)
             .ToLocalChecked();
 
-    CHECK(counters_map->Set(context, from_name_object, counters_row).IsJust());
+    CHECK(
+        counters_map->DefineOwnProperty(context, from_name_object, counters_row)
+            .IsJust());
   }
 
   return counters_map;
@@ -286,19 +289,14 @@
   __ Dispatch();
 }
 
-void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
-  Node* index = __ BytecodeOperandIdx(0);
-  Node* constant = __ LoadConstantPoolEntry(index);
-  __ SetAccumulator(constant);
-  __ Dispatch();
-}
-
-
 // LdaConstant <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
-  DoLoadConstant(assembler);
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  __ SetAccumulator(constant);
+  __ Dispatch();
 }
 
 // LdaUndefined
@@ -311,6 +309,16 @@
   __ Dispatch();
 }
 
+// LdrUndefined <reg>
+//
+// Loads undefined into the accumulator and |reg|.
+void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+  Node* destination = __ BytecodeOperandReg(0);
+  __ StoreRegister(undefined_value, destination);
+  __ Dispatch();
+}
 
 // LdaNull
 //
@@ -321,7 +329,6 @@
   __ Dispatch();
 }
 
-
 // LdaTheHole
 //
 // Load TheHole into the accumulator.
@@ -331,7 +338,6 @@
   __ Dispatch();
 }
 
-
 // LdaTrue
 //
 // Load True into the accumulator.
@@ -341,7 +347,6 @@
   __ Dispatch();
 }
 
-
 // LdaFalse
 //
 // Load False into the accumulator.
@@ -351,7 +356,6 @@
   __ Dispatch();
 }
 
-
 // Ldar <src>
 //
 // Load accumulator with value from register <src>.
@@ -362,7 +366,6 @@
   __ Dispatch();
 }
 
-
 // Star <dst>
 //
 // Store accumulator to register <dst>.
@@ -373,7 +376,6 @@
   __ Dispatch();
 }
 
-
 // Mov <src> <dst>
 //
 // Stores the value of register <src> to register <dst>.
@@ -385,48 +387,58 @@
   __ Dispatch();
 }
 
-
-void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadGlobal(Callable ic,
+                                   InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
-  Node* native_context =
-      __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
-  Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
 
-  // Load the global via the LoadIC.
+  // Load the global via the LoadGlobalIC.
   Node* code_target = __ HeapConstant(ic.code());
-  Node* constant_index = __ BytecodeOperandIdx(0);
-  Node* name = __ LoadConstantPoolEntry(constant_index);
-  Node* raw_slot = __ BytecodeOperandIdx(1);
+  Node* raw_slot = __ BytecodeOperandIdx(0);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
-                             name, smi_slot, type_feedback_vector);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  return __ CallStub(ic.descriptor(), code_target, context, smi_slot,
+                     type_feedback_vector);
 }
 
-// LdaGlobal <name_index> <slot>
+// LdaGlobal <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> outside of a typeof.
 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-// LdaGlobalInsideTypeof <name_index> <slot>
+// LdrGlobal <slot> <reg>
+//
+// Load the global with name in constant pool entry <name_index> into
+// register <reg> using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(1);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
+}
+
+// LdaGlobalInsideTypeof <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> inside of a typeof.
 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
+  Callable ic =
+      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
+  Node* result = BuildLoadGlobal(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
-void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -446,40 +458,51 @@
   __ Dispatch();
 }
 
-
 // StaGlobalSloppy <name_index> <slot>
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
+  DoStaGlobal(ic, assembler);
 }
 
-
 // StaGlobalStrict <name_index> <slot>
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoStoreGlobal(ic, assembler);
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
+  DoStaGlobal(ic, assembler);
+}
+
+compiler::Node* Interpreter::BuildLoadContextSlot(
+    InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* context = __ LoadRegister(reg_index);
+  Node* slot_index = __ BytecodeOperandIdx(1);
+  return __ LoadContextSlot(context, slot_index);
 }
 
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ LoadRegister(reg_index);
-  Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* result = __ LoadContextSlot(context, slot_index);
+  Node* result = BuildLoadContextSlot(assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// LdrContextSlot <context> <slot_index> <reg>
+//
+// Load the object in <slot_index> of <context> into register <reg>.
+void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
+  Node* result = BuildLoadContextSlot(assembler);
+  Node* destination = __ BytecodeOperandReg(2);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
+}
+
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
@@ -492,8 +515,8 @@
   __ Dispatch();
 }
 
-void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
-                                   InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
+                                  InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
@@ -507,7 +530,7 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+  DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
 // LdaLookupSlotInsideTypeof <name_index>
@@ -515,11 +538,11 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+  DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
-void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
-                                    InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
+                                  InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
@@ -537,19 +560,19 @@
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
-  DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+  DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
 }
 
-
 // StaLookupSlotStrict <name_index>
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
-  DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+  DoStaLookupSlot(LanguageMode::STRICT, assembler);
 }
 
-void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadNamedProperty(Callable ic,
+                                          InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -559,23 +582,35 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
-                             name, smi_slot, type_feedback_vector);
+  return __ CallStub(ic.descriptor(), code_target, context, object, name,
+                     smi_slot, type_feedback_vector);
+}
+
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadNamedProperty(ic, assembler);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-// LoadIC <object> <name_index> <slot>
+// LdrNamedProperty <object> <name_index> <slot> <reg>
 //
 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   UNINITIALIZED);
-  DoLoadIC(ic, assembler);
+// constant pool entry <name_index> and puts the result into register <reg>.
+void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadNamedProperty(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(3);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
 }
 
-void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
+                                          InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -584,20 +619,31 @@
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
-                             name, smi_slot, type_feedback_vector);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  return __ CallStub(ic.descriptor(), code_target, context, object, name,
+                     smi_slot, type_feedback_vector);
 }
 
 // KeyedLoadIC <object> <slot>
 //
 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
 // in the accumulator.
-void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadKeyedProperty(ic, assembler);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// LdrKeyedProperty <object> <slot> <reg>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator and puts the result in register <reg>.
+void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+  Node* result = BuildLoadKeyedProperty(ic, assembler);
+  Node* destination = __ BytecodeOperandReg(2);
+  __ StoreRegister(result, destination);
+  __ Dispatch();
 }
 
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
@@ -616,27 +662,23 @@
   __ Dispatch();
 }
 
-
-// StoreICSloppy <object> <name_index> <slot>
+// StaNamedPropertySloppy <object> <name_index> <slot>
 //
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
   DoStoreIC(ic, assembler);
 }
 
-
-// StoreICStrict <object> <name_index> <slot>
+// StaNamedPropertyStrict <object> <name_index> <slot>
 //
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
   DoStoreIC(ic, assembler);
 }
 
@@ -656,25 +698,21 @@
   __ Dispatch();
 }
 
-
-// KeyedStoreICSloppy <object> <key> <slot>
+// StaKeyedPropertySloppy <object> <key> <slot>
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
   DoKeyedStoreIC(ic, assembler);
 }
 
-
-// KeyedStoreICStore <object> <key> <slot>
+// StaKeyedPropertyStrict <object> <key> <slot>
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
   DoKeyedStoreIC(ic, assembler);
 }
 
@@ -691,7 +729,6 @@
   __ Dispatch();
 }
 
-
 // PopContext <context>
 //
 // Pops the current context and sets <context> as the new context.
@@ -702,33 +739,6 @@
   __ Dispatch();
 }
 
-void Interpreter::DoBinaryOp(Callable callable,
-                             InterpreterAssembler* assembler) {
-  // TODO(bmeurer): Collect definition side type feedback for various
-  // binary operations.
-  Node* target = __ HeapConstant(callable.code());
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* lhs = __ LoadRegister(reg_index);
-  Node* rhs = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
-                             InterpreterAssembler* assembler) {
-  // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
-  // operations, instead of calling builtins directly.
-  Node* reg_index = __ BytecodeOperandReg(0);
-  Node* lhs = __ LoadRegister(reg_index);
-  Node* rhs = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(function_id, context, lhs, rhs);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
 template <class Generator>
 void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
@@ -747,7 +757,6 @@
   DoBinaryOp<AddStub>(assembler);
 }
 
-
 // Sub <src>
 //
 // Subtract register <src> from accumulator.
@@ -755,7 +764,6 @@
   DoBinaryOp<SubtractStub>(assembler);
 }
 
-
 // Mul <src>
 //
 // Multiply accumulator by register <src>.
@@ -763,7 +771,6 @@
   DoBinaryOp<MultiplyStub>(assembler);
 }
 
-
 // Div <src>
 //
 // Divide register <src> by accumulator.
@@ -771,7 +778,6 @@
   DoBinaryOp<DivideStub>(assembler);
 }
 
-
 // Mod <src>
 //
 // Modulo register <src> by accumulator.
@@ -779,7 +785,6 @@
   DoBinaryOp<ModulusStub>(assembler);
 }
 
-
 // BitwiseOr <src>
 //
 // BitwiseOr register <src> to accumulator.
@@ -787,7 +792,6 @@
   DoBinaryOp<BitwiseOrStub>(assembler);
 }
 
-
 // BitwiseXor <src>
 //
 // BitwiseXor register <src> to accumulator.
@@ -795,7 +799,6 @@
   DoBinaryOp<BitwiseXorStub>(assembler);
 }
 
-
 // BitwiseAnd <src>
 //
 // BitwiseAnd register <src> to accumulator.
@@ -803,7 +806,6 @@
   DoBinaryOp<BitwiseAndStub>(assembler);
 }
 
-
 // ShiftLeft <src>
 //
 // Left shifts register <src> by the count specified in the accumulator.
@@ -814,7 +816,6 @@
   DoBinaryOp<ShiftLeftStub>(assembler);
 }
 
-
 // ShiftRight <src>
 //
 // Right shifts register <src> by the count specified in the accumulator.
@@ -825,7 +826,6 @@
   DoBinaryOp<ShiftRightStub>(assembler);
 }
 
-
 // ShiftRightLogical <src>
 //
 // Right Shifts register <src> by the count specified in the accumulator.
@@ -836,6 +836,17 @@
   DoBinaryOp<ShiftRightLogicalStub>(assembler);
 }
 
+void Interpreter::DoUnaryOp(Callable callable,
+                            InterpreterAssembler* assembler) {
+  Node* target = __ HeapConstant(callable.code());
+  Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallStub(callable.descriptor(), target, context, accumulator);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
 template <class Generator>
 void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
@@ -845,6 +856,27 @@
   __ Dispatch();
 }
 
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToName(isolate_), assembler);
+}
+
+// ToNumber
+//
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+}
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
+  DoUnaryOp(CodeFactory::ToObject(isolate_), assembler);
+}
+
 // Inc
 //
 // Increments value in the accumulator by one.
@@ -859,14 +891,22 @@
   DoUnaryOp<DecStub>(assembler);
 }
 
-void Interpreter::DoLogicalNotOp(Node* value, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildToBoolean(Node* value,
+                                  InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  return ToBooleanStub::Generate(assembler, value, context);
+}
+
+Node* Interpreter::BuildLogicalNot(Node* value,
+                                   InterpreterAssembler* assembler) {
+  Variable result(assembler, MachineRepresentation::kTagged);
   Label if_true(assembler), if_false(assembler), end(assembler);
   Node* true_value = __ BooleanConstant(true);
   Node* false_value = __ BooleanConstant(false);
   __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
   __ Bind(&if_true);
   {
-    __ SetAccumulator(false_value);
+    result.Bind(false_value);
     __ Goto(&end);
   }
   __ Bind(&if_false);
@@ -875,24 +915,23 @@
       __ AbortIfWordNotEqual(value, false_value,
                              BailoutReason::kExpectedBooleanValue);
     }
-    __ SetAccumulator(true_value);
+    result.Bind(true_value);
     __ Goto(&end);
   }
   __ Bind(&end);
+  return result.value();
 }
 
-// ToBooleanLogicalNot
+// LogicalNot
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
+// ToBooleanLogicalNot
 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  DoLogicalNotOp(to_boolean_value, assembler);
+  Node* value = __ GetAccumulator();
+  Node* to_boolean_value = BuildToBoolean(value, assembler);
+  Node* result = BuildLogicalNot(to_boolean_value, assembler);
+  __ SetAccumulator(result);
   __ Dispatch();
 }
 
@@ -902,7 +941,8 @@
 // value.
 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  DoLogicalNotOp(value, assembler);
+  Node* result = BuildLogicalNot(value, assembler);
+  __ SetAccumulator(result);
   __ Dispatch();
 }
 
@@ -911,14 +951,7 @@
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::Typeof(isolate_);
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  DoUnaryOp(CodeFactory::Typeof(isolate_), assembler);
 }
 
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -932,7 +965,6 @@
   __ Dispatch();
 }
 
-
 // DeletePropertyStrict
 //
 // Delete the property specified in the accumulator from the object
@@ -941,7 +973,6 @@
   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
 }
 
-
 // DeletePropertySloppy
 //
 // Delete the property specified in the accumulator from the object
@@ -967,7 +998,6 @@
   __ Dispatch();
 }
 
-
 // Call <callable> <receiver> <arg_count>
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
@@ -995,7 +1025,6 @@
   __ Dispatch();
 }
 
-
 // CallRuntime <function_id> <first_arg> <arg_count>
 //
 // Call the runtime function |function_id| with the first argument in
@@ -1011,7 +1040,7 @@
 // |function_id| with the first argument in |first_arg| and |arg_count|
 // arguments in subsequent registers.
 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
-  Node* function_id = __ BytecodeOperandRuntimeId(0);
+  Node* function_id = __ BytecodeOperandIntrinsicId(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* arg_count = __ BytecodeOperandCount(2);
   Node* context = __ GetContext();
@@ -1042,7 +1071,6 @@
   __ Dispatch();
 }
 
-
 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
 //
 // Call the runtime function |function_id| which returns a pair, with the
@@ -1074,7 +1102,6 @@
   __ Dispatch();
 }
 
-
 // CallJSRuntime <context_index> <receiver> <arg_count>
 //
 // Call the JS runtime function that has the |context_index| with the receiver
@@ -1098,7 +1125,6 @@
   __ Dispatch();
 }
 
-
 // New <constructor> <first_arg> <arg_count>
 //
 // Call operator new with |constructor| and the first argument in
@@ -1113,109 +1139,67 @@
 //
 // Test if the value in the <src> register equals the accumulator.
 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
+  DoBinaryOp<EqualStub>(assembler);
 }
 
-
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
 void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
+  DoBinaryOp<NotEqualStub>(assembler);
 }
 
-
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
+  DoBinaryOp<StrictEqualStub>(assembler);
 }
 
-
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
+  DoBinaryOp<LessThanStub>(assembler);
 }
 
-
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
+  DoBinaryOp<GreaterThanStub>(assembler);
 }
 
-
 // TestLessThanOrEqual <src>
 //
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
+  DoBinaryOp<LessThanOrEqualStub>(assembler);
 }
 
-
 // TestGreaterThanOrEqual <src>
 //
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
+  DoBinaryOp<GreaterThanOrEqualStub>(assembler);
 }
 
-
 // TestIn <src>
 //
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::HasProperty(isolate_), assembler);
+  DoBinaryOp<HasPropertyStub>(assembler);
 }
 
-
 // TestInstanceOf <src>
 //
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
-  DoBinaryOp(CodeFactory::InstanceOf(isolate_), assembler);
-}
-
-void Interpreter::DoTypeConversionOp(Callable callable,
-                                     InterpreterAssembler* assembler) {
-  Node* target = __ HeapConstant(callable.code());
-  Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-// ToName
-//
-// Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
-}
-
-
-// ToNumber
-//
-// Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
-}
-
-
-// ToObject
-//
-// Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(InterpreterAssembler* assembler) {
-  DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
+  DoBinaryOp<InstanceOfStub>(assembler);
 }
 
 // Jump <imm>
@@ -1289,12 +1273,8 @@
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1307,12 +1287,8 @@
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
     InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1325,12 +1301,8 @@
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1343,12 +1315,8 @@
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
     InterpreterAssembler* assembler) {
-  Callable callable = CodeFactory::ToBoolean(isolate_);
-  Node* target = __ HeapConstant(callable.code());
   Node* accumulator = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* to_boolean_value =
-      __ CallStub(callable.descriptor(), target, context, accumulator);
+  Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1579,7 +1547,6 @@
   }
 }
 
-
 // CreateUnmappedArguments
 //
 // Creates a new unmapped arguments object.
@@ -1639,7 +1606,6 @@
   __ Abort(kUnexpectedReturnFromThrow);
 }
 
-
 // ReThrow
 //
 // Re-throws the exception in the accumulator.
@@ -1651,7 +1617,6 @@
   __ Abort(kUnexpectedReturnFromThrow);
 }
 
-
 // Return
 //
 // Return the value in the accumulator.
@@ -1821,11 +1786,23 @@
 // SuspendGenerator <generator>
 //
 // Exports the register file and stores it into the generator.  Also stores the
-// current context and the state given in the accumulator into the generator.
+// current context, the state given in the accumulator, and the current bytecode
+// offset (for debugging purposes) into the generator.
 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
   Node* generator_reg = __ BytecodeOperandReg(0);
   Node* generator = __ LoadRegister(generator_reg);
 
+  Label if_stepping(assembler, Label::kDeferred), ok(assembler);
+  Node* step_action_address = __ ExternalConstant(
+      ExternalReference::debug_last_step_action_address(isolate_));
+  Node* step_action = __ Load(MachineType::Int8(), step_action_address);
+  STATIC_ASSERT(StepIn > StepNext);
+  STATIC_ASSERT(StepFrame > StepNext);
+  STATIC_ASSERT(LastStepAction == StepFrame);
+  Node* step_next = __ Int32Constant(StepNext);
+  __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+  __ Bind(&ok);
+
   Node* array =
       __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
   Node* context = __ GetContext();
@@ -1835,7 +1812,18 @@
   __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
   __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
 
+  Node* offset = __ SmiTag(__ BytecodeOffset());
+  __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
+                      offset);
+
   __ Dispatch();
+
+  __ Bind(&if_stepping);
+  {
+    Node* context = __ GetContext();
+    __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+    __ Goto(&ok);
+  }
 }
 
 // ResumeGenerator <generator>
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index d774d8b..468486c 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -70,17 +70,13 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
-  // Generates code to perform the binary operation via |callable|.
-  void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
-
-  // Generates code to perform the binary operation via |function_id|.
-  void DoBinaryOp(Runtime::FunctionId function_id,
-                  InterpreterAssembler* assembler);
-
   // Generates code to perform the binary operation via |Generator|.
   template <class Generator>
   void DoBinaryOp(InterpreterAssembler* assembler);
 
+  // Generates code to perform the unary operation via |callable|.
+  void DoUnaryOp(Callable callable, InterpreterAssembler* assembler);
+
   // Generates code to perform the unary operation via |Generator|.
   template <class Generator>
   void DoUnaryOp(InterpreterAssembler* assembler);
@@ -89,22 +85,10 @@
   // |compare_op|.
   void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
 
-  // Generates code to load a constant from the constant pool.
-  void DoLoadConstant(InterpreterAssembler* assembler);
-
-  // Generates code to perform a global load via |ic|.
-  void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
-
   // Generates code to perform a global store via |ic|.
-  void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
+  void DoStaGlobal(Callable ic, InterpreterAssembler* assembler);
 
-  // Generates code to perform a named property load via |ic|.
-  void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
-
-  // Generates code to perform a keyed property load via |ic|.
-  void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
-
-  // Generates code to perform a namedproperty store via |ic|.
+  // Generates code to perform a named property store via |ic|.
   void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property store via |ic|.
@@ -125,23 +109,44 @@
   // Generates code to perform a constructor call.
   void DoCallConstruct(InterpreterAssembler* assembler);
 
-  // Generates code to perform a type conversion.
-  void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
-
-  // Generates code to perform logical-not on boolean |value|.
-  void DoLogicalNotOp(compiler::Node* value, InterpreterAssembler* assembler);
-
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
                 InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot load via |function_id|.
-  void DoLoadLookupSlot(Runtime::FunctionId function_id,
-                        InterpreterAssembler* assembler);
+  void DoLdaLookupSlot(Runtime::FunctionId function_id,
+                       InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot store depending on |language_mode|.
-  void DoStoreLookupSlot(LanguageMode language_mode,
-                         InterpreterAssembler* assembler);
+  void DoStaLookupSlot(LanguageMode language_mode,
+                       InterpreterAssembler* assembler);
+
+  // Generates a node with the undefined constant.
+  compiler::Node* BuildLoadUndefined(InterpreterAssembler* assembler);
+
+  // Generates code to load a context slot.
+  compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
+
+  // Generates code to load a global.
+  compiler::Node* BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler);
+
+  // Generates code to load a named property.
+  compiler::Node* BuildLoadNamedProperty(Callable ic,
+                                         InterpreterAssembler* assembler);
+
+  // Generates code to load a keyed property.
+  compiler::Node* BuildLoadKeyedProperty(Callable ic,
+                                         InterpreterAssembler* assembler);
+
+  // Generates code to perform logical-not on boolean |value| and returns the
+  // result.
+  compiler::Node* BuildLogicalNot(compiler::Node* value,
+                                  InterpreterAssembler* assembler);
+
+  // Generates code to convert |value| to a boolean and returns the
+  // result.
+  compiler::Node* BuildToBoolean(compiler::Node* value,
+                                 InterpreterAssembler* assembler);
 
   uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
 
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 65bfa20..579c6c4 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -23,19 +23,13 @@
 // - we record the difference from the previous position,
 // - we just stuff one bit for the type into the bytecode offset,
 // - we write least-significant bits first,
-// - negative numbers occur only rarely, so we use a denormalized
-//   most-significant byte (a byte with all zeros, which normally wouldn't
-//   make any sense) to encode a negative sign, so that we 'pay' nothing for
-//   positive numbers, but have to pay a full byte for negative integers.
+// - we use zig-zag encoding to encode both positive and negative numbers.
 
 namespace {
 
-// A zero-value in the most-significant byte is used to mark negative numbers.
-const int kNegativeSignMarker = 0;
-
 // Each byte is encoded as MoreBit | ValueBits.
 class MoreBit : public BitField8<bool, 7, 1> {};
-class ValueBits : public BitField8<int, 0, 7> {};
+class ValueBits : public BitField8<unsigned, 0, 7> {};
 
 // Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
 void AddAndSetEntry(PositionTableEntry& value,
@@ -54,62 +48,57 @@
 
 // Helper: Encode an integer.
 void EncodeInt(ZoneVector<byte>& bytes, int value) {
-  bool sign = false;
-  if (value < 0) {
-    sign = true;
-    value = -value;
-  }
-
+  // Zig-zag encoding.
+  static const int kShift = kIntSize * kBitsPerByte - 1;
+  value = ((value << 1) ^ (value >> kShift));
+  DCHECK_GE(value, 0);
+  unsigned int encoded = static_cast<unsigned int>(value);
   bool more;
   do {
-    more = value > ValueBits::kMax;
-    bytes.push_back(MoreBit::encode(more || sign) |
-                    ValueBits::encode(value & ValueBits::kMax));
-    value >>= ValueBits::kSize;
+    more = encoded > ValueBits::kMax;
+    bytes.push_back(MoreBit::encode(more) |
+                    ValueBits::encode(encoded & ValueBits::kMask));
+    encoded >>= ValueBits::kSize;
   } while (more);
-
-  if (sign) {
-    bytes.push_back(MoreBit::encode(false) |
-                    ValueBits::encode(kNegativeSignMarker));
-  }
 }
 
 // Encode a PositionTableEntry.
 void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
-  // 1 bit for sign + is_statement each, which leaves 30b for the value.
-  DCHECK(abs(entry.bytecode_offset) < (1 << 30));
-  EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+  // We only accept ascending bytecode offsets.
+  DCHECK(entry.bytecode_offset >= 0);
+  // Since bytecode_offset is not negative, we use sign to encode is_statement.
+  EncodeInt(bytes, entry.is_statement ? entry.bytecode_offset
+                                      : -entry.bytecode_offset - 1);
   EncodeInt(bytes, entry.source_position);
 }
 
 // Helper: Decode an integer.
 void DecodeInt(ByteArray* bytes, int* index, int* v) {
   byte current;
-  int n = 0;
-  int value = 0;
+  int shift = 0;
+  int decoded = 0;
   bool more;
   do {
     current = bytes->get((*index)++);
-    value |= ValueBits::decode(current) << (n * ValueBits::kSize);
-    n++;
+    decoded |= ValueBits::decode(current) << shift;
     more = MoreBit::decode(current);
+    shift += ValueBits::kSize;
   } while (more);
-
-  if (ValueBits::decode(current) == kNegativeSignMarker) {
-    value = -value;
-  }
-  *v = value;
+  DCHECK_GE(decoded, 0);
+  decoded = (decoded >> 1) ^ (-(decoded & 1));
+  *v = decoded;
 }
 
 void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
   int tmp;
   DecodeInt(bytes, index, &tmp);
-  entry->is_statement = (tmp & 1);
-
-  // Note that '>>' needs to be arithmetic shift in order to handle negative
-  // numbers properly.
-  entry->bytecode_offset = (tmp >> 1);
-
+  if (tmp >= 0) {
+    entry->is_statement = true;
+    entry->bytecode_offset = tmp;
+  } else {
+    entry->is_statement = false;
+    entry->bytecode_offset = -(tmp + 1);
+  }
   DecodeInt(bytes, index, &entry->source_position);
 }